Hello,
I have a user-defined density which does not support automatic differentiation (the function gamma_inv_cdf in SpecialFunctions.jl does not support AD).
Is there a support for finite difference calculation with FiniteDiff.jl in Turing.jl? If so, how should I pass it for MAP estimation and HMC/NUTS sampling?
As an alternative, can I provide analytical calcuation of the gradient of the log-density?
MWI:
using Turing
using SpecialFunctions
using StatsFun
# Define the model for the original toy model.
@model function model_toyProblem_tranformed(y)
ω ~ Normal( 0, 1 ) # standard normal prior
x = gammainvccdf(1.0, 1, 0.5*erfc(ω/sqrt(2)) )
y ~ Normal( x, 1.0) # pushforward likelihood
end
model_func = model_toyProblem_tranformed(0.0)
map_estimate = optimize( model_func, MAP()); # MAP estimate
Output:
MethodError: no method matching __gamma_inc_inv(::ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}, ::ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}, ::Bool)
Closest candidates are:
__gamma_inc_inv(::Float64, ::Float64, ::Bool)
@ SpecialFunctions ~/.julia/packages/SpecialFunctions/QH8rV/src/gamma_inc.jl:1012
__gamma_inc_inv(::T, ::T, ::Bool) where T<:Union{Float16, Float32}
@ SpecialFunctions ~/.julia/packages/SpecialFunctions/QH8rV/src/gamma_inc.jl:1089
Stacktrace:
[1] _gamma_inc_inv
@ ~/.julia/packages/SpecialFunctions/QH8rV/src/gamma_inc.jl:1009 [inlined]
[2] gamma_inc_inv
@ ~/.julia/packages/SpecialFunctions/QH8rV/src/gamma_inc.jl:991 [inlined]
[3] gammainvccdf
@ ~/.julia/packages/StatsFuns/mrf0e/src/distrs/gamma.jl:108 [inlined]
[4] model_toyProblem_tranformed(__model__::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, __varinfo__::DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}}, Vector{Set{DynamicPPL.Selector}}}}, ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}}, __context__::Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}, y::Float64)
@ Main ./In[15]:4
[5] _evaluate!!
@ ~/.julia/packages/DynamicPPL/rXg4T/src/model.jl:963 [inlined]
[6] evaluate_threadunsafe!!
@ ~/.julia/packages/DynamicPPL/rXg4T/src/model.jl:936 [inlined]
[7] evaluate!!
@ ~/.julia/packages/DynamicPPL/rXg4T/src/model.jl:889 [inlined]
[8] (::LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}})(z::Vector{ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}})
@ Turing.Optimisation ~/.julia/packages/Turing/lkUBK/src/optimisation/Optimisation.jl:111
[9] logdensity
@ ~/.julia/packages/Turing/lkUBK/src/optimisation/Optimisation.jl:115 [inlined]
[10] Fix1
@ ./operators.jl:1118 [inlined]
[11] vector_mode_dual_eval!
@ ~/.julia/packages/ForwardDiff/PcZ48/src/apiutils.jl:24 [inlined]
[12] vector_mode_gradient!
@ ~/.julia/packages/ForwardDiff/PcZ48/src/gradient.jl:96 [inlined]
[13] gradient!(result::DiffResults.MutableDiffResult{1, Float64, Tuple{Vector{Float64}}}, f::Base.Fix1{typeof(LogDensityProblems.logdensity), LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}}}, x::Vector{Float64}, cfg::ForwardDiff.GradientConfig{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}}}, ::Val{true})
@ ForwardDiff ~/.julia/packages/ForwardDiff/PcZ48/src/gradient.jl:37
[14] gradient!
@ ForwardDiff ~/.julia/packages/ForwardDiff/PcZ48/src/gradient.jl:35 [inlined]
[15] logdensity_and_gradient(fℓ::LogDensityProblemsADForwardDiffExt.ForwardDiffLogDensity{LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}}, ForwardDiff.Chunk{1}, ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, ForwardDiff.GradientConfig{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}}}}, x::Vector{Float64})
@ LogDensityProblemsADForwardDiffExt ~/.julia/packages/LogDensityProblemsAD/rBlLq/ext/LogDensityProblemsADForwardDiffExt.jl:118
[16] (::LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}})(F::Float64, G::Vector{Float64}, z::Vector{Float64})
@ Turing.Optimisation ~/.julia/packages/Turing/lkUBK/src/optimisation/Optimisation.jl:122
[17] (::NLSolversBase.var"#69#70"{NLSolversBase.InplaceObjective{Nothing, LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}}, Nothing, Nothing, Nothing}, Float64})(G::Vector{Float64}, x::Vector{Float64})
@ NLSolversBase ~/.julia/packages/NLSolversBase/kavn7/src/objective_types/incomplete.jl:54
[18] value_gradient!!(obj::OnceDifferentiable{Float64, Vector{Float64}, Vector{Float64}}, x::Vector{Float64})
@ NLSolversBase ~/.julia/packages/NLSolversBase/kavn7/src/interface.jl:82
[19] initial_state(method::LBFGS{Nothing, LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Optim.var"#19#21"}, options::Optim.Options{Float64, Nothing}, d::OnceDifferentiable{Float64, Vector{Float64}, Vector{Float64}}, initial_x::Vector{Float64})
@ Optim ~/.julia/packages/Optim/DV9Sq/src/multivariate/solvers/first_order/l_bfgs.jl:164
[20] optimize(d::OnceDifferentiable{Float64, Vector{Float64}, Vector{Float64}}, initial_x::Vector{Float64}, method::LBFGS{Nothing, LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Optim.var"#19#21"}, options::Optim.Options{Float64, Nothing}, state::Optim.LBFGSState{Vector{Float64}, Vector{Vector{Float64}}, Vector{Vector{Float64}}, Float64, Vector{Float64}})
@ Optim ~/.julia/packages/Optim/DV9Sq/src/multivariate/optimize/optimize.jl:36 [inlined]
[21] optimize(f::NLSolversBase.InplaceObjective{Nothing, LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}}, Nothing, Nothing, Nothing}, initial_x::Vector{Float64}, method::LBFGS{Nothing, LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Optim.var"#19#21"}, options::Optim.Options{Float64, Nothing}; inplace::Bool, autodiff::Symbol)
@ Optim ~/.julia/packages/Optim/DV9Sq/src/multivariate/optimize/interface.jl:143
[22] optimize
@ ~/.julia/packages/Optim/DV9Sq/src/multivariate/optimize/interface.jl:139 [inlined]
[23] _optimize(::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, ::LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}}, ::Vector{Float64}, ::LBFGS{Nothing, LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Optim.var"#19#21"}, ::Optim.Options{Float64, Nothing}; kwargs::@Kwargs{})
@ TuringOptimExt ~/.julia/packages/Turing/lkUBK/ext/TuringOptimExt.jl:235
[24] _optimize(::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, ::LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}}, ::Vector{Float64}, ::LBFGS{Nothing, LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Optim.var"#19#21"}, ::Optim.Options{Float64, Nothing})
@ TuringOptimExt ~/.julia/packages/Turing/lkUBK/ext/TuringOptimExt.jl:219
[25] _map_optimize(::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, ::Vector{Float64}, ::Vararg{Any}; kwargs::@Kwargs{})
@ TuringOptimExt ~/.julia/packages/Turing/lkUBK/ext/TuringOptimExt.jl:211
[26] _map_optimize(::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, ::Vector{Float64}, ::Vararg{Any})
@ TuringOptimExt ~/.julia/packages/Turing/lkUBK/ext/TuringOptimExt.jl:209
[27] optimize(model::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, ::MAP, options::Optim.Options{Float64, Nothing}; kwargs::@Kwargs{})
@ TuringOptimExt ~/.julia/packages/Turing/lkUBK/ext/TuringOptimExt.jl:186
[28] optimize
@ TuringOptimExt ~/.julia/packages/Turing/lkUBK/ext/TuringOptimExt.jl:181 [inlined]
[29] optimize(model::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, ::MAP)
@ TuringOptimExt ~/.julia/packages/Turing/lkUBK/ext/TuringOptimExt.jl:181
[30] top-level scope
@ In[18]:1
For NUTS sampling (note the double S)
# Prepare arguments.
nr_samples = 10^4 # number of samples
# Generate a chain containing the samples of x and θ
chn_NUTS = sample(model_func,
NUTS(),
nr_samples)
Output:
MethodError: no method matching __gamma_inc_inv(::ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}, ::ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}, ::Bool)
Closest candidates are:
__gamma_inc_inv(::Float64, ::Float64, ::Bool)
@ SpecialFunctions ~/.julia/packages/SpecialFunctions/QH8rV/src/gamma_inc.jl:1012
__gamma_inc_inv(::T, ::T, ::Bool) where T<:Union{Float16, Float32}
@ SpecialFunctions ~/.julia/packages/SpecialFunctions/QH8rV/src/gamma_inc.jl:1089
Stacktrace:
[1] _gamma_inc_inv
@ ~/.julia/packages/SpecialFunctions/QH8rV/src/gamma_inc.jl:1009 [inlined]
[2] gamma_inc_inv
@ ~/.julia/packages/SpecialFunctions/QH8rV/src/gamma_inc.jl:991 [inlined]
[3] gammainvccdf
@ ~/.julia/packages/StatsFuns/mrf0e/src/distrs/gamma.jl:108 [inlined]
[4] model_toyProblem_tranformed(__model__::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, __varinfo__::DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}}, Vector{Set{DynamicPPL.Selector}}}}, ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}}, __context__::DynamicPPL.SamplingContext{DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext, Random.TaskLocalRNG}, y::Float64)
@ Main ./In[15]:4
[5] _evaluate!!
@ ~/.julia/packages/DynamicPPL/rXg4T/src/model.jl:963 [inlined]
[6] evaluate_threadunsafe!!
@ ~/.julia/packages/DynamicPPL/rXg4T/src/model.jl:936 [inlined]
[7] evaluate!!
@ ~/.julia/packages/DynamicPPL/rXg4T/src/model.jl:889 [inlined]
[8] logdensity(f::LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, DynamicPPL.SamplingContext{DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext, Random.TaskLocalRNG}}, θ::Vector{ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}})
@ DynamicPPL ~/.julia/packages/DynamicPPL/rXg4T/src/logdensityfunction.jl:94
[9] Fix1
@ ./operators.jl:1118 [inlined]
[10] vector_mode_dual_eval!
@ ~/.julia/packages/ForwardDiff/PcZ48/src/apiutils.jl:24 [inlined]
[11] vector_mode_gradient!
@ ~/.julia/packages/ForwardDiff/PcZ48/src/gradient.jl:96 [inlined]
[12] gradient!(result::DiffResults.MutableDiffResult{1, Float64, Tuple{Vector{Float64}}}, f::Base.Fix1{typeof(LogDensityProblems.logdensity), LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, DynamicPPL.SamplingContext{DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext, Random.TaskLocalRNG}}}, x::Vector{Float64}, cfg::ForwardDiff.GradientConfig{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}}}, ::Val{true})
@ ForwardDiff ~/.julia/packages/ForwardDiff/PcZ48/src/gradient.jl:37
[13] gradient!(result::DiffResults.MutableDiffResult{1, Float64, Tuple{Vector{Float64}}}, f::Base.Fix1{typeof(LogDensityProblems.logdensity), LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, DynamicPPL.SamplingContext{DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext, Random.TaskLocalRNG}}}, x::Vector{Float64}, cfg::ForwardDiff.GradientConfig{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}}}, ::Val{true})
@ ForwardDiff ~/.julia/packages/ForwardDiff/PcZ48/src/gradient.jl:35 [inlined]
[14] logdensity_and_gradient
@ ~/.julia/packages/LogDensityProblemsAD/rBlLq/ext/LogDensityProblemsADForwardDiffExt.jl:118 [inlined]
[15] ∂logπ∂θ
@ ~/.julia/packages/Turing/lkUBK/src/mcmc/hmc.jl:159 [inlined]
[16] ∂H∂θ
@ ~/.julia/packages/AdvancedHMC/AlvV4/src/hamiltonian.jl:38 [inlined]
[17] phasepoint(h::AdvancedHMC.Hamiltonian{AdvancedHMC.DiagEuclideanMetric{Float64, Vector{Float64}}, AdvancedHMC.GaussianKinetic, Base.Fix1{typeof(LogDensityProblems.logdensity), LogDensityProblemsADForwardDiffExt.ForwardDiffLogDensity{LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, DynamicPPL.SamplingContext{DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext, Random.TaskLocalRNG}}, ForwardDiff.Chunk{1}, ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, ForwardDiff.GradientConfig{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}}}}}, Turing.Inference.var"#∂logπ∂θ#31"{LogDensityProblemsADForwardDiffExt.ForwardDiffLogDensity{LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, DynamicPPL.SamplingContext{DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext, Random.TaskLocalRNG}}, ForwardDiff.Chunk{1}, ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, ForwardDiff.GradientConfig{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}}}}}}, θ::Vector{Float64}, r::Vector{Float64})
@ AdvancedHMC ~/.julia/packages/AdvancedHMC/AlvV4/src/hamiltonian.jl:74
[18] phasepoint(rng::Random.TaskLocalRNG, θ::Vector{Float64}, h::AdvancedHMC.Hamiltonian{AdvancedHMC.DiagEuclideanMetric{Float64, Vector{Float64}}, AdvancedHMC.GaussianKinetic, Base.Fix1{typeof(LogDensityProblems.logdensity), LogDensityProblemsADForwardDiffExt.ForwardDiffLogDensity{LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, DynamicPPL.SamplingContext{DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext, Random.TaskLocalRNG}}, ForwardDiff.Chunk{1}, ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, ForwardDiff.GradientConfig{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}}}}}, Turing.Inference.var"#∂logπ∂θ#31"{LogDensityProblemsADForwardDiffExt.ForwardDiffLogDensity{LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, DynamicPPL.SamplingContext{DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}, DynamicPPL.DefaultContext, Random.TaskLocalRNG}}, ForwardDiff.Chunk{1}, ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, ForwardDiff.GradientConfig{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1, Vector{ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 1}}}}}})
@ AdvancedHMC ~/.julia/packages/AdvancedHMC/AlvV4/src/hamiltonian.jl:155
[19] initialstep(rng::Random.TaskLocalRNG, model::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, spl::DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}, vi_original::DynamicPPL.TypedVarInfo{@NamedTuple{ω::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:ω, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{AbstractPPL.VarName{:ω, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}; initial_params::Nothing, nadapts::Int64, kwargs::@Kwargs{})
@ Turing.Inference ~/.julia/packages/Turing/lkUBK/src/mcmc/hmc.jl:163
[20] step(rng::Random.TaskLocalRNG, model::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, spl::DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}; initial_params::Nothing, kwargs::@Kwargs{nadapts::Int64})
@ DynamicPPL ~/.julia/packages/DynamicPPL/rXg4T/src/sampler.jl:116
[21] step
@ ~/.julia/packages/DynamicPPL/rXg4T/src/sampler.jl:99 [inlined]
[22] macro expansion
@ ~/.julia/packages/AbstractMCMC/YrmkI/src/sample.jl:130 [inlined]
[23] macro expansion
@ ~/.julia/packages/ProgressLogging/6KXlp/src/ProgressLogging.jl:328 [inlined]
[24] (::AbstractMCMC.var"#22#23"{Bool, String, Nothing, Int64, Int64, Nothing, @Kwargs{nadapts::Int64}, Random.TaskLocalRNG, DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}, Int64, Int64})()
@ AbstractMCMC ~/.julia/packages/AbstractMCMC/YrmkI/src/logging.jl:12
[25] with_logstate(f::Function, logstate::Any)
@ Base.CoreLogging ./logging.jl:515
[26] with_logger
@ Base.CoreLogging ./logging.jl:627 [inlined]
[27] with_progresslogger(f::Function, _module::Module, logger::Logging.ConsoleLogger)
@ AbstractMCMC ~/.julia/packages/AbstractMCMC/YrmkI/src/logging.jl:36
[28] macro expansion
@ ~/.julia/packages/AbstractMCMC/YrmkI/src/logging.jl:11 [inlined]
[29] mcmcsample(rng::Random.TaskLocalRNG, model::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, sampler::DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}, N::Int64; progress::Bool, progressname::String, callback::Nothing, discard_initial::Int64, thinning::Int64, chain_type::Type, initial_state::Nothing, kwargs::@Kwargs{nadapts::Int64})
@ AbstractMCMC ~/.julia/packages/AbstractMCMC/YrmkI/src/sample.jl:120
[30] sample(rng::Random.TaskLocalRNG, model::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, sampler::DynamicPPL.Sampler{NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}}, N::Int64; chain_type::Type, resume_from::Nothing, initial_state::Nothing, progress::Bool, nadapts::Int64, discard_adapt::Bool, discard_initial::Int64, kwargs::@Kwargs{})
@ Turing.Inference ~/.julia/packages/Turing/lkUBK/src/mcmc/hmc.jl:117
[31] sample
@ Turing.Inference ~/.julia/packages/Turing/lkUBK/src/mcmc/hmc.jl:86 [inlined]
[32] #sample#3
@ Turing.Inference ~/.julia/packages/Turing/lkUBK/src/mcmc/Inference.jl:219 [inlined]
[33] sample
@ Turing.Inference ~/.julia/packages/Turing/lkUBK/src/mcmc/Inference.jl:212 [inlined]
[34] #sample#2
@ Turing.Inference ~/.julia/packages/Turing/lkUBK/src/mcmc/Inference.jl:209 [inlined]
[35] sample(model::DynamicPPL.Model{typeof(model_toyProblem_tranformed), (:y,), (), (), Tuple{Float64}, Tuple{}, DynamicPPL.DefaultContext}, alg::NUTS{AutoForwardDiff{0, Nothing}, (), AdvancedHMC.DiagEuclideanMetric}, N::Int64)
@ Turing.Inference ~/.julia/packages/Turing/lkUBK/src/mcmc/Inference.jl:203
[36] top-level scope
@ In[19]:7
Thank you for your help,