Hi, when I try to train a NeuralODE with Discretecallback using 'sensealg=InterpolatingAdjoint(autojacvec=EnzymeVJP(), checkpointing=true) ’ ,
I get:
Enzyme execution failed.
Enzyme: unhandled augmented forward for jl_f_finalizer
Stacktrace:
[1] finalizer
@ ./gcutils.jl:87
[2] _
@ ~/.julia/packages/CUDA/Tl08O/src/array.jl:83
[3] CuArray
@ ~/.julia/packages/CUDA/Tl08O/src/array.jl:79
[4] derive
@ ~/.julia/packages/CUDA/Tl08O/src/array.jl:799
[5] unsafe_contiguous_view
@ ~/.julia/packages/GPUArrays/qt4ax/src/host/base.jl:319
[6] unsafe_view
@ ~/.julia/packages/GPUArrays/qt4ax/src/host/base.jl:314
[7] view
@ ~/.julia/packages/GPUArrays/qt4ax/src/host/base.jl:310
[8] maybeview
@ ./views.jl:148
[9] macro expansion
@ ~/.julia/packages/ComponentArrays/xO4hy/src/array_interface.jl:0
[10] _getindex
@ ~/.julia/packages/ComponentArrays/xO4hy/src/array_interface.jl:119
[11] getproperty
@ ~/.julia/packages/ComponentArrays/xO4hy/src/namedtuple_interface.jl:14
[12] macro expansion
@ ~/.julia/packages/Lux/PsW4M/src/layers/containers.jl:0
[13] applychain
@ ~/.julia/packages/Lux/PsW4M/src/layers/containers.jl:520
Stacktrace:
[1] finalizer
@ ./gcutils.jl:87 [inlined]
[2] _
@ ~/.julia/packages/CUDA/Tl08O/src/array.jl:83 [inlined]
[3] CuArray
@ ~/.julia/packages/CUDA/Tl08O/src/array.jl:79 [inlined]
[4] derive
@ ~/.julia/packages/CUDA/Tl08O/src/array.jl:799 [inlined]
[5] unsafe_contiguous_view
@ ~/.julia/packages/GPUArrays/qt4ax/src/host/base.jl:319 [inlined]
[6] unsafe_view
@ ~/.julia/packages/GPUArrays/qt4ax/src/host/base.jl:314 [inlined]
[7] view
@ ~/.julia/packages/GPUArrays/qt4ax/src/host/base.jl:310 [inlined]
[8] maybeview
@ ./views.jl:148 [inlined]
[9] macro expansion
@ ~/.julia/packages/ComponentArrays/xO4hy/src/array_interface.jl:0 [inlined]
[10] _getindex
@ ~/.julia/packages/ComponentArrays/xO4hy/src/array_interface.jl:119 [inlined]
[11] getproperty
@ ~/.julia/packages/ComponentArrays/xO4hy/src/namedtuple_interface.jl:14 [inlined]
[12] macro expansion
@ ~/.julia/packages/Lux/PsW4M/src/layers/containers.jl:0 [inlined]
[13] applychain
@ ~/.julia/packages/Lux/PsW4M/src/layers/containers.jl:520
[14] Chain
@ ~/.julia/packages/Lux/PsW4M/src/layers/containers.jl:518 [inlined]
[15] apply
@ ~/.julia/packages/LuxCore/yzx6E/src/LuxCore.jl:171 [inlined]
[16] dudt
@ ./In[92]:24 [inlined]
[17] dudt
@ ./In[92]:20 [inlined]
[18] ODEFunction
@ ~/.julia/packages/SciMLBase/Q1klk/src/scimlfunctions.jl:2335 [inlined]
[19] #138
@ ~/.julia/packages/SciMLSensitivity/se3y4/src/adjoint_common.jl:490 [inlined]
[20] diffejulia__138_128700_inner_1wrap
@ ~/.julia/packages/SciMLSensitivity/se3y4/src/adjoint_common.jl:0
[21] macro expansion
@ ~/.julia/packages/Enzyme/XGb4o/src/compiler.jl:7049 [inlined]
[22] enzyme_call
@ ~/.julia/packages/Enzyme/XGb4o/src/compiler.jl:6658 [inlined]
[23] CombinedAdjointThunk
@ ~/.julia/packages/Enzyme/XGb4o/src/compiler.jl:6535 [inlined]
[24] autodiff
@ ~/.julia/packages/Enzyme/XGb4o/src/Enzyme.jl:320 [inlined]
[25] _vecjacobian!(dλ::CuArray{Float32, 1, CUDA.DeviceMemory}, y::CuArray{Float32, 1, CUDA.DeviceMemory}, λ::CuArray{Float32, 1, CUDA.DeviceMemory}, p::ComponentVector{Float32, CuArray{Float32, 1, CUDA.DeviceMemory}, Tuple{Axis{(layer_1 = ViewAxis(1:80, Axis(weight = ViewAxis(1:64, ShapedAxis((16, 4))), bias = ViewAxis(65:80, ShapedAxis((16, 1))))), layer_2 = ViewAxis(81:352, Axis(weight = ViewAxis(1:256, ShapedAxis((16, 16))), bias = ViewAxis(257:272, ShapedAxis((16, 1))))), layer_3 = ViewAxis(353:420, Axis(weight = ViewAxis(1:64, ShapedAxis((4, 16))), bias = ViewAxis(65:68, ShapedAxis((4, 1))))))}}}, t::Float32, S::SciMLSensitivity.ODEInterpolatingAdjointSensitivityFunction{SciMLSensitivity.AdjointDiffCache{Nothing, SciMLSensitivity.var"#138#142"{ODEFunction{false, SciMLBase.FullSpecialize, var"#dudt#52"{@NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}, var"#dudt#51#53"{NeuralODE{Chain{@NamedTuple{layer_1::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_2::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_3::Dense{true, typeof(identity), typeof(glorot_uniform), typeof(zeros32)}}, Nothing}, Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, Tuple{Float32, Float32}, LuxCUDADevice{Nothing}, @Kwargs{sensealg::InterpolatingAdjoint{0, true, Val{:central}, EnzymeVJP}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing, Nothing, Nothing}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Tuple{CuArray{Float32, 1, CUDA.DeviceMemory}, ComponentVector{Float32, CuArray{Float32, 1, CUDA.DeviceMemory}, Tuple{Axis{(layer_1 = ViewAxis(1:80, Axis(weight = ViewAxis(1:64, ShapedAxis((16, 4))), bias = ViewAxis(65:80, ShapedAxis((16, 1))))), layer_2 = ViewAxis(81:352, Axis(weight = ViewAxis(1:256, ShapedAxis((16, 16))), bias = ViewAxis(257:272, ShapedAxis((16, 1))))), layer_3 = ViewAxis(353:420, Axis(weight = ViewAxis(1:64, ShapedAxis((4, 16))), bias = ViewAxis(65:68, ShapedAxis((4, 1))))))}}}, CuArray{Float32, 1, CUDA.DeviceMemory}, CuArray{Float32, 1, CUDA.DeviceMemory}, CuArray{Float32, 1, CUDA.DeviceMemory}, SciMLSensitivity.var"#138#142"{ODEFunction{false, SciMLBase.FullSpecialize, var"#dudt#52"{@NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}, var"#dudt#51#53"{NeuralODE{Chain{@NamedTuple{layer_1::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_2::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_3::Dense{true, typeof(identity), typeof(glorot_uniform), typeof(zeros32)}}, Nothing}, Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, Tuple{Float32, Float32}, LuxCUDADevice{Nothing}, @Kwargs{sensealg::InterpolatingAdjoint{0, true, Val{:central}, EnzymeVJP}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing, Nothing, Nothing}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Base.OneTo{Int64}, UnitRange{Int64}, LinearAlgebra.UniformScaling{Bool}}, InterpolatingAdjoint{0, true, Val{:central}, EnzymeVJP}, CuArray{Float32, 1, CUDA.DeviceMemory}, ODESolution{Float32, 2, Vector{CuArray{Float32, 1, CUDA.DeviceMemory}}, Nothing, Nothing, Vector{Float32}, Vector{Vector{CuArray{Float32, 1, CUDA.DeviceMemory}}}, Nothing, ODEProblem{CuArray{Float32, 1, CUDA.DeviceMemory}, Tuple{Float32, Float32}, false, ComponentVector{Float32, CuArray{Float32, 1, CUDA.DeviceMemory}, Tuple{Axis{(layer_1 = ViewAxis(1:80, Axis(weight = ViewAxis(1:64, ShapedAxis((16, 4))), bias = ViewAxis(65:80, ShapedAxis((16, 1))))), layer_2 = ViewAxis(81:352, Axis(weight = ViewAxis(1:256, ShapedAxis((16, 16))), bias = ViewAxis(257:272, ShapedAxis((16, 1))))), layer_3 = ViewAxis(353:420, Axis(weight = ViewAxis(1:64, ShapedAxis((4, 16))), bias = ViewAxis(65:68, ShapedAxis((4, 1))))))}}}, ODEFunction{false, SciMLBase.FullSpecialize, var"#dudt#52"{@NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}, var"#dudt#51#53"{NeuralODE{Chain{@NamedTuple{layer_1::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_2::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_3::Dense{true, typeof(identity), typeof(glorot_uniform), typeof(zeros32)}}, Nothing}, Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, Tuple{Float32, Float32}, LuxCUDADevice{Nothing}, @Kwargs{sensealg::InterpolatingAdjoint{0, true, Val{:central}, EnzymeVJP}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing, Nothing, Nothing}, @Kwargs{callback::CallbackSet{Tuple{}, Tuple{DiscreteCallback{DiffEqCallbacks.var"#109#113", SciMLSensitivity.TrackedAffect{Float32, CuArray{Float32, 1, CUDA.DeviceMemory}, ComponentVector{Float32, CuArray{Float32, 1, CUDA.DeviceMemory}, Tuple{Axis{(layer_1 = ViewAxis(1:80, Axis(weight = ViewAxis(1:64, ShapedAxis((16, 4))), bias = ViewAxis(65:80, ShapedAxis((16, 1))))), layer_2 = ViewAxis(81:352, Axis(weight = ViewAxis(1:256, ShapedAxis((16, 16))), bias = ViewAxis(257:272, ShapedAxis((16, 1))))), layer_3 = ViewAxis(353:420, Axis(weight = ViewAxis(1:64, ShapedAxis((4, 16))), bias = ViewAxis(65:68, ShapedAxis((4, 1))))))}}}, DiffEqCallbacks.var"#110#114"{typeof(affect!)}, Nothing, Int64}, DiffEqCallbacks.var"#111#115"{typeof(SciMLBase.INITIALIZE_DEFAULT), Bool, typeof(affect!)}, typeof(SciMLBase.FINALIZE_DEFAULT)}}}}, SciMLBase.StandardODEProblem}, Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, OrdinaryDiffEq.InterpolationData{ODEFunction{false, SciMLBase.FullSpecialize, var"#dudt#52"{@NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}, var"#dudt#51#53"{NeuralODE{Chain{@NamedTuple{layer_1::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_2::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_3::Dense{true, typeof(identity), typeof(glorot_uniform), typeof(zeros32)}}, Nothing}, Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, Tuple{Float32, Float32}, LuxCUDADevice{Nothing}, @Kwargs{sensealg::InterpolatingAdjoint{0, true, Val{:central}, EnzymeVJP}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing, Nothing, Nothing}, Vector{CuArray{Float32, 1, CUDA.DeviceMemory}}, Vector{Float32}, Vector{Vector{CuArray{Float32, 1, CUDA.DeviceMemory}}}, Nothing, OrdinaryDiffEq.Tsit5ConstantCache, Nothing}, SciMLBase.DEStats, Nothing, Nothing, Nothing}, SciMLSensitivity.CheckpointSolution{ODESolution{Float32, 2, Vector{CuArray{Float32, 1, CUDA.DeviceMemory}}, Nothing, Nothing, Vector{Float32}, Vector{Vector{CuArray{Float32, 1, CUDA.DeviceMemory}}}, Nothing, ODEProblem{CuArray{Float32, 1, CUDA.DeviceMemory}, Tuple{Float32, Float32}, false, ComponentVector{Float32, CuArray{Float32, 1, CUDA.DeviceMemory}, Tuple{Axis{(layer_1 = ViewAxis(1:80, Axis(weight = ViewAxis(1:64, ShapedAxis((16, 4))), bias = ViewAxis(65:80, ShapedAxis((16, 1))))), layer_2 = ViewAxis(81:352, Axis(weight = ViewAxis(1:256, ShapedAxis((16, 16))), bias = ViewAxis(257:272, ShapedAxis((16, 1))))), layer_3 = ViewAxis(353:420, Axis(weight = ViewAxis(1:64, ShapedAxis((4, 16))), bias = ViewAxis(65:68, ShapedAxis((4, 1))))))}}}, ODEFunction{false, SciMLBase.FullSpecialize, var"#dudt#52"{@NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}, var"#dudt#51#53"{NeuralODE{Chain{@NamedTuple{layer_1::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_2::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_3::Dense{true, typeof(identity), typeof(glorot_uniform), typeof(zeros32)}}, Nothing}, Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, Tuple{Float32, Float32}, LuxCUDADevice{Nothing}, @Kwargs{sensealg::InterpolatingAdjoint{0, true, Val{:central}, EnzymeVJP}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing, Nothing, Nothing}, @Kwargs{callback::CallbackSet{Tuple{}, Tuple{DiscreteCallback{DiffEqCallbacks.var"#109#113", SciMLSensitivity.TrackedAffect{Float32, CuArray{Float32, 1, CUDA.DeviceMemory}, ComponentVector{Float32, CuArray{Float32, 1, CUDA.DeviceMemory}, Tuple{Axis{(layer_1 = ViewAxis(1:80, Axis(weight = ViewAxis(1:64, ShapedAxis((16, 4))), bias = ViewAxis(65:80, ShapedAxis((16, 1))))), layer_2 = ViewAxis(81:352, Axis(weight = ViewAxis(1:256, ShapedAxis((16, 16))), bias = ViewAxis(257:272, ShapedAxis((16, 1))))), layer_3 = ViewAxis(353:420, Axis(weight = ViewAxis(1:64, ShapedAxis((4, 16))), bias = ViewAxis(65:68, ShapedAxis((4, 1))))))}}}, DiffEqCallbacks.var"#110#114"{typeof(affect!)}, Nothing, Int64}, DiffEqCallbacks.var"#111#115"{typeof(SciMLBase.INITIALIZE_DEFAULT), Bool, typeof(affect!)}, typeof(SciMLBase.FINALIZE_DEFAULT)}}}}, SciMLBase.StandardODEProblem}, Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, OrdinaryDiffEq.InterpolationData{ODEFunction{false, SciMLBase.FullSpecialize, var"#dudt#52"{@NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}, var"#dudt#51#53"{NeuralODE{Chain{@NamedTuple{layer_1::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_2::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_3::Dense{true, typeof(identity), typeof(glorot_uniform), typeof(zeros32)}}, Nothing}, Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, Tuple{Float32, Float32}, LuxCUDADevice{Nothing}, @Kwargs{sensealg::InterpolatingAdjoint{0, true, Val{:central}, EnzymeVJP}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing, Nothing, Nothing}, Vector{CuArray{Float32, 1, CUDA.DeviceMemory}}, Vector{Float32}, Vector{Vector{CuArray{Float32, 1, CUDA.DeviceMemory}}}, Nothing, OrdinaryDiffEq.Tsit5ConstantCache, Nothing}, SciMLBase.DEStats, Nothing, Nothing, Nothing}, Vector{Tuple{Float32, Float32}}, @NamedTuple{reltol::Float64, abstol::Float64}, Nothing}, ODEProblem{CuArray{Float32, 1, CUDA.DeviceMemory}, Tuple{Float32, Float32}, false, ComponentVector{Float32, CuArray{Float32, 1, CUDA.DeviceMemory}, Tuple{Axis{(layer_1 = ViewAxis(1:80, Axis(weight = ViewAxis(1:64, ShapedAxis((16, 4))), bias = ViewAxis(65:80, ShapedAxis((16, 1))))), layer_2 = ViewAxis(81:352, Axis(weight = ViewAxis(1:256, ShapedAxis((16, 16))), bias = ViewAxis(257:272, ShapedAxis((16, 1))))), layer_3 = ViewAxis(353:420, Axis(weight = ViewAxis(1:64, ShapedAxis((4, 16))), bias = ViewAxis(65:68, ShapedAxis((4, 1))))))}}}, ODEFunction{false, SciMLBase.FullSpecialize, var"#dudt#52"{@NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}, var"#dudt#51#53"{NeuralODE{Chain{@NamedTuple{layer_1::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_2::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_3::Dense{true, typeof(identity), typeof(glorot_uniform), typeof(zeros32)}}, Nothing}, Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, Tuple{Float32, Float32}, LuxCUDADevice{Nothing}, @Kwargs{sensealg::InterpolatingAdjoint{0, true, Val{:central}, EnzymeVJP}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing, Nothing, Nothing}, @Kwargs{callback::CallbackSet{Tuple{}, Tuple{DiscreteCallback{DiffEqCallbacks.var"#109#113", SciMLSensitivity.TrackedAffect{Float32, CuArray{Float32, 1, CUDA.DeviceMemory}, ComponentVector{Float32, CuArray{Float32, 1, CUDA.DeviceMemory}, Tuple{Axis{(layer_1 = ViewAxis(1:80, Axis(weight = ViewAxis(1:64, ShapedAxis((16, 4))), bias = ViewAxis(65:80, ShapedAxis((16, 1))))), layer_2 = ViewAxis(81:352, Axis(weight = ViewAxis(1:256, ShapedAxis((16, 16))), bias = ViewAxis(257:272, ShapedAxis((16, 1))))), layer_3 = ViewAxis(353:420, Axis(weight = ViewAxis(1:64, ShapedAxis((4, 16))), bias = ViewAxis(65:68, ShapedAxis((4, 1))))))}}}, DiffEqCallbacks.var"#110#114"{typeof(affect!)}, Nothing, Int64}, DiffEqCallbacks.var"#111#115"{typeof(SciMLBase.INITIALIZE_DEFAULT), Bool, typeof(affect!)}, typeof(SciMLBase.FINALIZE_DEFAULT)}}}}, SciMLBase.StandardODEProblem}, ODEFunction{false, SciMLBase.FullSpecialize, var"#dudt#52"{@NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}, var"#dudt#51#53"{NeuralODE{Chain{@NamedTuple{layer_1::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_2::Dense{true, typeof(tanh_fast), typeof(glorot_uniform), typeof(zeros32)}, layer_3::Dense{true, typeof(identity), typeof(glorot_uniform), typeof(zeros32)}}, Nothing}, Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, Tuple{Float32, Float32}, LuxCUDADevice{Nothing}, @Kwargs{sensealg::InterpolatingAdjoint{0, true, Val{:central}, EnzymeVJP}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing, Nothing, Nothing, Nothing}}, isautojacvec::EnzymeVJP, dgrad::CuArray{Float32, 1, CUDA.DeviceMemory}, dy::Nothing, W::Nothing)
@ SciMLSensitivity ~/.julia/packages/SciMLSensitivity/se3y4/src/derivative_wrappers.jl:728
...
Do you guys meet before?