I was trying to deploy my program to servers, but find the following code break under CUDA 9.x, it only works for CUDA 10.0.
Versions of CUDA related Julia packages are fine tuned to be same, and the generated device codes are same.
Is this a platform related issue? How can I fix it?
function measure!(reg::GPUReg{B, T}) where {B, T}
regm = reg |> rank3 # returns a tensor of order 3
pl = dropdims(mapreduce(abs2, +, regm, dims=2), dims=2)
pl_cpu = pl |> Matrix
res = CuArray(map(ib->_measure(view(pl_cpu, :, ib), 1)[], 1:B))
gpu_call(regm, (regm, res, pl)) do state, regm, res, pl
k,i,j = @cartesianidx regm
@inbounds rind = res[j] + 1
@inbounds regm[rind,i,j] = k==rind ? regm[rind,i,j]/sqrt(pl[rind, j]) : T(0)
return
end
res
end
@device_code_warntype
throws the following error
ERROR: GPU compilation of #11(CuArrays.CuKernelState, CuDeviceArray{Complex{Float64},3,CUDAnative.AS.Global}, CuDeviceArray{Int64,1,CUDAnative.AS.Global}, CuDeviceArray{Float64,2,CUDAnative.AS.Global}) failed
KernelError: recursion is currently not supported
Try inspecting the generated code with any of the @device_code_... macros.
Stacktrace:
[1] #IOBuffer#300 at iobuffer.jl:112
[2] print_to_string at strings/io.jl:112
[3] #IOBuffer#299 at iobuffer.jl:91
[4] #IOBuffer#300 at iobuffer.jl:112
[5] print_to_string at strings/io.jl:112
[6] throw_complex_domainerror at math.jl:31
[7] #11 at /home/jgliu/.julia/dev/CuYao/src/GPUReg.jl:57
Stacktrace:
[1] (::getfield(CUDAnative, Symbol("#hook_emit_function#58")){CUDAnative.CompilerContext,Array{Core.MethodInstance,1}})(::Core.MethodInstance, ::Core.CodeInfo, ::UInt64) at /home/jgliu/.julia/packages/CUDAnative/jRnXY/src/compiler/irgen.jl:97
[2] irgen(::CUDAnative.CompilerContext) at /home/jgliu/.julia/packages/CUDAnative/jRnXY/src/compiler/irgen.jl:133
[3] #compile#78(::Bool, ::Function, ::CUDAnative.CompilerContext) at ./logging.jl:308
[4] compile at /home/jgliu/.julia/packages/CUDAnative/jRnXY/src/compiler/driver.jl:49 [inlined]
[5] #compile#77(::Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}}, ::Function, ::CUDAdrv.CuDevice, ::Any, ::Any) at /home/jgliu/.julia/packages/CUDAnative/jRnXY/src/compiler/driver.jl:28
[6] compile at /home/jgliu/.julia/packages/CUDAnative/jRnXY/src/compiler/driver.jl:16 [inlined]
[7] macro expansion at /home/jgliu/.julia/packages/CUDAnative/jRnXY/src/execution.jl:259 [inlined]
[8] #cufunction#89(::Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}}, ::typeof(cufunction), ::getfield(CuYao, Symbol("##11#13")){Complex{Float64}}, ::Type{Tuple{CuArrays.CuKernelState,CuDeviceArray{Complex{Float64},3,CUDAnative.AS.Global},CuDeviceArray{Int64,1,CUDAnative.AS.Global},CuDeviceArray{Float64,2,CUDAnative.AS.Global}}}) at /home/jgliu/.julia/packages/CUDAnative/jRnXY/src/execution.jl:234
[9] cufunction(::Function, ::Type) at /home/jgliu/.julia/packages/CUDAnative/jRnXY/src/execution.jl:234
[10] macro expansion at /home/jgliu/.julia/packages/CUDAnative/jRnXY/src/execution.jl:202 [inlined]
[11] macro expansion at ./gcutils.jl:87 [inlined]
[12] macro expansion at /home/jgliu/.julia/packages/CUDAnative/jRnXY/src/execution.jl:199 [inlined]
[13] _gpu_call(::CuArrays.CuArrayBackend, ::Function, ::CuArray{Complex{Float64},3}, ::Tuple{CuArray{Complex{Float64},3},CuArray{Int64,1},CuArray{Float64,2}}, ::Tuple{Tuple{Int64},Tuple{Int64}}) at /home/jgliu/.julia/dev/CuArrays/src/gpuarray_interface.jl:59
[14] gpu_call(::Function, ::CuArray{Complex{Float64},3}, ::Tuple{CuArray{Complex{Float64},3},CuArray{Int64,1},CuArray{Float64,2}}, ::Int64) at /home/jgliu/.julia/dev/GPUArrays/src/abstract_gpu_interface.jl:151
[15] gpu_call at /home/jgliu/.julia/dev/GPUArrays/src/abstract_gpu_interface.jl:128 [inlined]
[16] measure!(::DefaultRegister{1000,Complex{Float64},CuArray{Complex{Float64},2}}) at /home/jgliu/.julia/dev/CuYao/src/GPUReg.jl:56
[17] top-level scope at /home/jgliu/.julia/packages/CUDAnative/jRnXY/src/reflection.jl:153 [inlined]
[18] top-level scope at ./none:0
this line is related to the error.
@inbounds regm[rind,i,j] = k==rind ? regm[rind,i,j]/sqrt(pl[rind, j]) : T(0)
Should I post an issue on github?