Thank you very much for your answer! The all backtrace is,
ERROR: MethodError: no method matching unsafe_convert(::Type{Ptr{Float32}}, ::CUDA.CuPtr{Float32})
Closest candidates are:
unsafe_convert(::Type{CUDA.CuRef{T}}, ::Any) where T at ~/.julia/packages/CUDA/35NC6/src/pointer.jl:208
unsafe_convert(::Type{CUDA.PtrOrCuPtr{T}}, ::Any) where T at ~/.julia/packages/CUDA/35NC6/src/pointer.jl:118
unsafe_convert(::Type{<:Union{CUDA.CuArrayPtr, CUDA.CuPtr, Ptr}}, ::CUDA.Mem.AbstractBuffer) at ~/.julia/packages/CUDA/35NC6/lib/cudadrv/memory.jl:33
...
Stacktrace:
[1] gemv!(trans::Char, alpha::Float32, A::CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, X::Vector{Float32}, beta::Float32, Y::Vector{Float32})
@ LinearAlgebra.BLAS /usr/share/julia/stdlib/v1.8/LinearAlgebra/src/blas.jl:666
[2] gemv!(y::Vector{Float32}, tA::Char, A::CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, x::Vector{Float32}, α::Bool, β::Bool)
@ LinearAlgebra /usr/share/julia/stdlib/v1.8/LinearAlgebra/src/matmul.jl:503
[3] mul!
@ /usr/share/julia/stdlib/v1.8/LinearAlgebra/src/matmul.jl:65 [inlined]
[4] mul!
@ /usr/share/julia/stdlib/v1.8/LinearAlgebra/src/matmul.jl:276 [inlined]
[5] *
@ /usr/share/julia/stdlib/v1.8/LinearAlgebra/src/matmul.jl:52 [inlined]
[6] *
@ /usr/share/julia/stdlib/v1.8/LinearAlgebra/src/matmul.jl:119 [inlined]
[7] (::ChainRules.var"#1457#1460"{Adjoint{Float32, Vector{Float32}}, CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, ChainRulesCore.ProjectTo{Adjoint, NamedTuple{(:parent,), Tuple{ChainRulesCore.ProjectTo{AbstractArray, NamedTuple{(:element, :axes), Tuple{ChainRulesCore.ProjectTo{Float32, NamedTuple{(), Tuple{}}}, Tuple{Base.OneTo{Int64}}}}}}}}})()
@ ChainRules ~/.julia/packages/ChainRules/pEOSw/src/rulesets/Base/arraymath.jl:36
[8] unthunk
@ ~/.julia/packages/ChainRulesCore/zoCjl/src/tangent_types/thunks.jl:204 [inlined]
[9] wrap_chainrules_output
@ ~/.julia/packages/Zygote/WOy6z/src/compiler/chainrules.jl:110 [inlined]
[10] map
@ ./tuple.jl:223 [inlined]
[11] wrap_chainrules_output
@ ~/.julia/packages/Zygote/WOy6z/src/compiler/chainrules.jl:111 [inlined]
[12] (::Zygote.ZBack{ChainRules.var"#times_pullback#1459"{Adjoint{Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, ChainRulesCore.ProjectTo{AbstractArray, NamedTuple{(:element, :axes), Tuple{ChainRulesCore.ProjectTo{Float32, NamedTuple{(), Tuple{}}}, Tuple{Base.OneTo{Int64}, Base.OneTo{Int64}}}}}, ChainRulesCore.ProjectTo{Adjoint, NamedTuple{(:parent,), Tuple{ChainRulesCore.ProjectTo{AbstractArray, NamedTuple{(:element, :axes), Tuple{ChainRulesCore.ProjectTo{Float32, NamedTuple{(), Tuple{}}}, Tuple{Base.OneTo{Int64}}}}}}}}}})(dy::Adjoint{Float32, Vector{Float32}})
@ Zygote ~/.julia/packages/Zygote/WOy6z/src/compiler/chainrules.jl:211
[13] Pullback
@ ~/Datos/github/ISL/src/CustomLossFunction.jl:667 [inlined]
[14] (::Zygote.Pullback{Tuple{ISL.var"#376#381"{HyperParamsSlicedISL, CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, Vector{CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}}, Chain{Tuple{Dense{typeof(identity), CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, var"#5#7", ConvTranspose{2, 4, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, ConvTranspose{2, 4, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, ConvTranspose{2, 4, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, typeof(Flux.flatten), var"#6#8"}}}, Any})(Δ::Float32)
@ Zygote ~/.julia/packages/Zygote/WOy6z/src/compiler/interface2.jl:0
[15] (::Zygote.var"#75#76"{Zygote.Pullback{Tuple{ISL.var"#376#381"{HyperParamsSlicedISL, CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, Vector{CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}}, Chain{Tuple{Dense{typeof(identity), CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, var"#5#7", ConvTranspose{2, 4, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, ConvTranspose{2, 4, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, ConvTranspose{2, 4, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, typeof(Flux.flatten), var"#6#8"}}}, Any}})(Δ::Float32)
@ Zygote ~/.julia/packages/Zygote/WOy6z/src/compiler/interface.jl:45
[16] withgradient(f::Function, args::Chain{Tuple{Dense{typeof(identity), CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, var"#5#7", ConvTranspose{2, 4, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, ConvTranspose{2, 4, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, ConvTranspose{2, 4, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, typeof(Flux.flatten), var"#6#8"}})
@ Zygote ~/.julia/packages/Zygote/WOy6z/src/compiler/interface.jl:162
[17] macro expansion
@ ~/Datos/github/ISL/src/CustomLossFunction.jl:657 [inlined]
[18] macro expansion
@ ~/.julia/packages/ProgressMeter/vnCY0/src/ProgressMeter.jl:957 [inlined]
[19] sliced_invariant_statistical_loss_optimized_2(nn_model::Chain{Tuple{Dense{typeof(identity), CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, var"#5#7", ConvTranspose{2, 4, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, ConvTranspose{2, 4, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, ConvTranspose{2, 4, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, typeof(Flux.flatten), var"#6#8"}}, loader::DataLoader{MLUtils.MappedData{:auto, typeof(gpu), Matrix{Float32}}, Random._GLOBAL_RNG, Val{nothing}}, hparams::HyperParamsSlicedISL)
@ ISL ~/Datos/github/ISL/src/CustomLossFunction.jl:655
[20] macro expansion
@ ~/Datos/github/ISL/examples/Sliced_ISL/MNIST_sliced.jl:166 [inlined]
[21] top-level scope
@ ~/.julia/packages/ProgressMeter/vnCY0/src/ProgressMeter.jl:957