Ok, I progressed a little bit… onto the next error message
The BoundsError seemed to result from a problem in my definition of the Turing model. I changed the last 3 lines to the following and no longer get the error:
for i = 1:size(predicted, 1)
data[i,:] ~ MvNormal(predicted[i,:], σ)
end
But now I get the following:
ode.p[1] = -1.1
ode.p[1] = -0.5425958656908638
ode.p[1] = -0.5425958656908638
ode.p[1] = Dual{ForwardDiff.Tag{Turing.Core.var"#f#7"{DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}}(-0.5425958656908638,1.0,0.0)
ERROR: TypeError: in typeassert, expected Float64, got a value of type ForwardDiff.Dual{Nothing, Float64, 2}
Stacktrace:
[1] setindex!(A::Matrix{Float64}, x::ForwardDiff.Dual{ForwardDiff.Tag{Turing.Core.var"#f#7"{DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}, Float64, 2}, i1::Int64)
@ Base ./array.jl:903
[2] _unsafe_copyto!(dest::Matrix{Float64}, doffs::Int64, src::Matrix{ForwardDiff.Dual{ForwardDiff.Tag{Turing.Core.var"#f#7"{DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}, Float64, 2}}, soffs::Int64, n::Int64)
@ Base ./array.jl:253
[3] unsafe_copyto!
@ ./array.jl:307 [inlined]
[4] _copyto_impl!
@ ./array.jl:331 [inlined]
[5] copyto!
@ ./array.jl:317 [inlined]
[6] copyto!
@ ./array.jl:343 [inlined]
[7] copyto_axcheck!(dest::Matrix{Float64}, src::Matrix{ForwardDiff.Dual{ForwardDiff.Tag{Turing.Core.var"#f#7"{DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}, Float64, 2}})
@ Base ./abstractarray.jl:1104
[8] Array
@ ./array.jl:563 [inlined]
[9] convert(#unused#::Type{Matrix{Float64}}, a::Matrix{ForwardDiff.Dual{ForwardDiff.Tag{Turing.Core.var"#f#7"{DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}, Float64, 2}})
@ Base ./array.jl:554
[10] setproperty!(x::OrdinaryDiffEq.ODEIntegrator{CompositeAlgorithm{Tuple{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}, false, Matrix{Float64}, Nothing, Float64, Vector{Any}, Float64, Float64, Float64, Vector{Matrix{Float64}}, OrdinaryDiffEq.ODECompositeSolution{Float64, 3, Vector{Matrix{Float64}}, Nothing, Nothing, Vector{Float64}, Vector{Vector{Matrix{Float64}}}, ODEProblem{Matrix{Float64}, Tuple{Float64, Float64}, false, Vector{Any}, ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, CompositeAlgorithm{Tuple{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}, OrdinaryDiffEq.CompositeInterpolationData{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Vector{Matrix{Float64}}, Vector{Float64}, Vector{Vector{Matrix{Float64}}}, OrdinaryDiffEq.CompositeCache{Tuple{OrdinaryDiffEq.Tsit5ConstantCache{Float64, Float64}, OrdinaryDiffEq.Rosenbrock23ConstantCache{Float64, SciMLBase.TimeDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Matrix{Float64}, Vector{Any}}, SciMLBase.UDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Float64, Vector{Any}}, Matrix{Float64}, LU{Float64, Matrix{Float64}}, DefaultLinSolve}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}}, DiffEqBase.DEStats}, ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, OrdinaryDiffEq.CompositeCache{Tuple{OrdinaryDiffEq.Tsit5ConstantCache{Float64, Float64}, OrdinaryDiffEq.Rosenbrock23ConstantCache{Float64, SciMLBase.TimeDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Matrix{Float64}, Vector{Any}}, SciMLBase.UDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Float64, Vector{Any}}, Matrix{Float64}, LU{Float64, Matrix{Float64}}, DefaultLinSolve}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}, OrdinaryDiffEq.DEOptions{Float64, Float64, Float64, Float64, PIController{Float64}, typeof(DiffEqBase.ODE_DEFAULT_NORM), typeof(opnorm), Nothing, CallbackSet{Tuple{}, Tuple{}}, typeof(DiffEqBase.ODE_DEFAULT_ISOUTOFDOMAIN), typeof(DiffEqBase.ODE_DEFAULT_PROG_MESSAGE), typeof(DiffEqBase.ODE_DEFAULT_UNSTABLE_CHECK), DataStructures.BinaryMinHeap{Float64}, DataStructures.BinaryMinHeap{Float64}, Nothing, Nothing, Int64, StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}, StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}, Tuple{}}, Matrix{Float64}, Float64, Nothing, OrdinaryDiffEq.DefaultInit}, f::Symbol, v::Matrix{ForwardDiff.Dual{ForwardDiff.Tag{Turing.Core.var"#f#7"{DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}, Float64, 2}})
@ Base ./Base.jl:43
[11] initialize!(integrator::OrdinaryDiffEq.ODEIntegrator{CompositeAlgorithm{Tuple{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}, false, Matrix{Float64}, Nothing, Float64, Vector{Any}, Float64, Float64, Float64, Vector{Matrix{Float64}}, OrdinaryDiffEq.ODECompositeSolution{Float64, 3, Vector{Matrix{Float64}}, Nothing, Nothing, Vector{Float64}, Vector{Vector{Matrix{Float64}}}, ODEProblem{Matrix{Float64}, Tuple{Float64, Float64}, false, Vector{Any}, ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, CompositeAlgorithm{Tuple{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}, OrdinaryDiffEq.CompositeInterpolationData{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Vector{Matrix{Float64}}, Vector{Float64}, Vector{Vector{Matrix{Float64}}}, OrdinaryDiffEq.CompositeCache{Tuple{OrdinaryDiffEq.Tsit5ConstantCache{Float64, Float64}, OrdinaryDiffEq.Rosenbrock23ConstantCache{Float64, SciMLBase.TimeDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Matrix{Float64}, Vector{Any}}, SciMLBase.UDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Float64, Vector{Any}}, Matrix{Float64}, LU{Float64, Matrix{Float64}}, DefaultLinSolve}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}}, DiffEqBase.DEStats}, ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, OrdinaryDiffEq.CompositeCache{Tuple{OrdinaryDiffEq.Tsit5ConstantCache{Float64, Float64}, OrdinaryDiffEq.Rosenbrock23ConstantCache{Float64, SciMLBase.TimeDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Matrix{Float64}, Vector{Any}}, SciMLBase.UDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Float64, Vector{Any}}, Matrix{Float64}, LU{Float64, Matrix{Float64}}, DefaultLinSolve}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}, OrdinaryDiffEq.DEOptions{Float64, Float64, Float64, Float64, PIController{Float64}, typeof(DiffEqBase.ODE_DEFAULT_NORM), typeof(opnorm), Nothing, CallbackSet{Tuple{}, Tuple{}}, typeof(DiffEqBase.ODE_DEFAULT_ISOUTOFDOMAIN), typeof(DiffEqBase.ODE_DEFAULT_PROG_MESSAGE), typeof(DiffEqBase.ODE_DEFAULT_UNSTABLE_CHECK), DataStructures.BinaryMinHeap{Float64}, DataStructures.BinaryMinHeap{Float64}, Nothing, Nothing, Int64, StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}, StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}, Tuple{}}, Matrix{Float64}, Float64, Nothing, OrdinaryDiffEq.DefaultInit}, cache::OrdinaryDiffEq.Tsit5ConstantCache{Float64, Float64})
@ OrdinaryDiffEq ~/.julia/packages/OrdinaryDiffEq/2ZBfC/src/perform_step/low_order_rk_perform_step.jl:565
[12] initialize!(integrator::OrdinaryDiffEq.ODEIntegrator{CompositeAlgorithm{Tuple{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}, false, Matrix{Float64}, Nothing, Float64, Vector{Any}, Float64, Float64, Float64, Vector{Matrix{Float64}}, OrdinaryDiffEq.ODECompositeSolution{Float64, 3, Vector{Matrix{Float64}}, Nothing, Nothing, Vector{Float64}, Vector{Vector{Matrix{Float64}}}, ODEProblem{Matrix{Float64}, Tuple{Float64, Float64}, false, Vector{Any}, ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, CompositeAlgorithm{Tuple{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}, OrdinaryDiffEq.CompositeInterpolationData{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Vector{Matrix{Float64}}, Vector{Float64}, Vector{Vector{Matrix{Float64}}}, OrdinaryDiffEq.CompositeCache{Tuple{OrdinaryDiffEq.Tsit5ConstantCache{Float64, Float64}, OrdinaryDiffEq.Rosenbrock23ConstantCache{Float64, SciMLBase.TimeDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Matrix{Float64}, Vector{Any}}, SciMLBase.UDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Float64, Vector{Any}}, Matrix{Float64}, LU{Float64, Matrix{Float64}}, DefaultLinSolve}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}}, DiffEqBase.DEStats}, ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, OrdinaryDiffEq.CompositeCache{Tuple{OrdinaryDiffEq.Tsit5ConstantCache{Float64, Float64}, OrdinaryDiffEq.Rosenbrock23ConstantCache{Float64, SciMLBase.TimeDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Matrix{Float64}, Vector{Any}}, SciMLBase.UDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Float64, Vector{Any}}, Matrix{Float64}, LU{Float64, Matrix{Float64}}, DefaultLinSolve}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}, OrdinaryDiffEq.DEOptions{Float64, Float64, Float64, Float64, PIController{Float64}, typeof(DiffEqBase.ODE_DEFAULT_NORM), typeof(opnorm), Nothing, CallbackSet{Tuple{}, Tuple{}}, typeof(DiffEqBase.ODE_DEFAULT_ISOUTOFDOMAIN), typeof(DiffEqBase.ODE_DEFAULT_PROG_MESSAGE), typeof(DiffEqBase.ODE_DEFAULT_UNSTABLE_CHECK), DataStructures.BinaryMinHeap{Float64}, DataStructures.BinaryMinHeap{Float64}, Nothing, Nothing, Int64, StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}, StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}, Tuple{}}, Matrix{Float64}, Float64, Nothing, OrdinaryDiffEq.DefaultInit}, cache::OrdinaryDiffEq.CompositeCache{Tuple{OrdinaryDiffEq.Tsit5ConstantCache{Float64, Float64}, OrdinaryDiffEq.Rosenbrock23ConstantCache{Float64, SciMLBase.TimeDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Matrix{Float64}, Vector{Any}}, SciMLBase.UDerivativeWrapper{ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Float64, Vector{Any}}, Matrix{Float64}, LU{Float64, Matrix{Float64}}, DefaultLinSolve}}, OrdinaryDiffEq.AutoSwitchCache{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}})
@ OrdinaryDiffEq ~/.julia/packages/OrdinaryDiffEq/2ZBfC/src/perform_step/composite_perform_step.jl:39
[13] __init(prob::ODEProblem{Matrix{Float64}, Tuple{Float64, Float64}, false, Vector{Any}, ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, alg::CompositeAlgorithm{Tuple{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}}, AutoSwitch{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}, timeseries_init::Tuple{}, ts_init::Tuple{}, ks_init::Tuple{}, recompile::Type{Val{true}}; saveat::StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}, tstops::StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}, d_discontinuities::Tuple{}, save_idxs::Nothing, save_everystep::Bool, save_on::Bool, save_start::Bool, save_end::Nothing, callback::Nothing, dense::Bool, calck::Bool, dt::Float64, dtmin::Nothing, dtmax::Float64, force_dtmin::Bool, adaptive::Bool, gamma::Rational{Int64}, abstol::Nothing, reltol::Nothing, qmin::Rational{Int64}, qmax::Int64, qsteady_min::Int64, qsteady_max::Int64, beta1::Nothing, beta2::Nothing, qoldinit::Rational{Int64}, controller::Nothing, fullnormalize::Bool, failfactor::Int64, maxiters::Int64, internalnorm::typeof(DiffEqBase.ODE_DEFAULT_NORM), internalopnorm::typeof(opnorm), isoutofdomain::typeof(DiffEqBase.ODE_DEFAULT_ISOUTOFDOMAIN), unstable_check::typeof(DiffEqBase.ODE_DEFAULT_UNSTABLE_CHECK), verbose::Bool, timeseries_errors::Bool, dense_errors::Bool, advance_to_tstop::Bool, stop_at_next_tstop::Bool, initialize_save::Bool, progress::Bool, progress_steps::Int64, progress_name::String, progress_message::typeof(DiffEqBase.ODE_DEFAULT_PROG_MESSAGE), userdata::Nothing, allow_extrapolation::Bool, initialize_integrator::Bool, alias_u0::Bool, alias_du0::Bool, initializealg::OrdinaryDiffEq.DefaultInit, kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
@ OrdinaryDiffEq ~/.julia/packages/OrdinaryDiffEq/2ZBfC/src/solve.jl:455
[14] #__solve#408
@ ~/.julia/packages/OrdinaryDiffEq/2ZBfC/src/solve.jl:4 [inlined]
[15] #solve_call#56
@ ~/.julia/packages/DiffEqBase/Nyo4y/src/solve.jl:61 [inlined]
[16] solve_up(prob::ODEProblem{Matrix{Float64}, Tuple{Float64, Float64}, false, Vector{Any}, ODEFunction{false, typeof(f!), UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, sensealg::Nothing, u0::Matrix{Float64}, p::Vector{Any}, args::CompositeAlgorithm{Tuple{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}}, AutoSwitch{Tsit5, Rosenbrock23{0, true, DefaultLinSolve, DataType}, Rational{Int64}, Int64}}; kwargs::Base.Pairs{Symbol, Any, Tuple{Symbol, Symbol, Symbol}, NamedTuple{(:saveat, :tstops, :dt), Tuple{StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}, StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}, Float64}}})
@ DiffEqBase ~/.julia/packages/DiffEqBase/Nyo4y/src/solve.jl:82
[17] #solve#57
@ ~/.julia/packages/DiffEqBase/Nyo4y/src/solve.jl:70 [inlined]
[18] forward_model(params::Vector{Any}, states::Vector{Matrix{Float64}}, opt::options)
@ Main ~/Desktop/projects/lDCM/src/test.jl:97
[19] #12
@ ~/Desktop/projects/lDCM/src/test.jl:160 [inlined]
[20] (::var"#12#13")(__rng__::Random._GLOBAL_RNG, __model__::DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, __varinfo__::DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{ForwardDiff.Dual{ForwardDiff.Tag{Turing.Core.var"#f#7"{DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}, Float64, 2}}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{ForwardDiff.Dual{ForwardDiff.Tag{Turing.Core.var"#f#7"{DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}, Float64, 2}}, Vector{Set{DynamicPPL.Selector}}}}}, ForwardDiff.Dual{ForwardDiff.Tag{Turing.Core.var"#f#7"{DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}, Float64, 2}}, __sampler__::DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, __context__::DynamicPPL.DefaultContext, data::Matrix{Float64}, params::Vector{Any}, opt::options)
@ Main ./none:0
[21] macro expansion
@ ~/.julia/packages/DynamicPPL/SgzCy/src/model.jl:0 [inlined]
[22] _evaluate
@ ~/.julia/packages/DynamicPPL/SgzCy/src/model.jl:154 [inlined]
[23] evaluate_threadunsafe
@ ~/.julia/packages/DynamicPPL/SgzCy/src/model.jl:127 [inlined]
[24] Model
@ ~/.julia/packages/DynamicPPL/SgzCy/src/model.jl:92 [inlined]
[25] Model
@ ~/.julia/packages/DynamicPPL/SgzCy/src/model.jl:98 [inlined]
[26] f
@ ~/.julia/packages/Turing/uAz5c/src/core/ad.jl:111 [inlined]
[27] vector_mode_dual_eval!(f::Turing.Core.var"#f#7"{DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, cfg::ForwardDiff.GradientConfig{ForwardDiff.Tag{Turing.Core.var"#f#7"{DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}, Float64, 2, Vector{ForwardDiff.Dual{ForwardDiff.Tag{Turing.Core.var"#f#7"{DynamicPPL.TypedVarInfo{NamedTuple{(:a11, :σ), Tuple{DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:a11, Tuple{}}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:a11, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, Tuple{}}, Int64}, Vector{LogNormal{Float64}}, Vector{AbstractPPL.VarName{:σ, Tuple{}}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, DynamicPPL.Model{var"#12#13", (:data, :params, :opt), (), (), Tuple{Matrix{Float64}, Vector{Any}, options}, Tuple{}}, DynamicPPL.Sampler{NUTS{Turing.Core.ForwardDiffAD{40}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext}, Float64}, Float64, 2}}}, x::Vector{Float64})
@ ForwardDiff ~/.julia/packages/ForwardDiff/tZ5o1/src/apiutils.jl:37
[28] vector_mode_gradient!(result::Vector{Float64}, f::Turing.Core.var"#f#7"...
[had to shorten the message]
Now I suspect that there is an issue with the particular parameter estimate, because it is “Nothing”. I understand that I need to reject this estimate, like in this example: Advanced Usage
But I can’t figure out the correct place to do this in my case, because the problem is not directly inside the Turing model. Do you have any suggestions?