Turing.jl is not working

I worked with Turing.jl and it worked nicely. But currently, it has stopped working. Even for a simple toy example from Turing website like

# Import packages.
using Turing
using StatsPlots

# Define a simple Normal model with unknown mean and variance.
@model function gdemo(x, y)
    s² ~ InverseGamma(2, 3)
    m ~ Normal(0, sqrt(s²))
    x ~ Normal(m, sqrt(s²))
    y ~ Normal(m, sqrt(s²))
end

c6 = sample(gdemo(1.5, 2), NUTS(0.65), 1000)

I received the following error:

MethodError: no method matching ADgradient(::Val{:ForwardDiff}, ::Turing.LogDensityFunction{TypedVarInfo{NamedTuple{(:s², :m), Tuple{DynamicPPL.Metadata{Dict{VarName{:s², Setfield.IdentityLens}, Int64}, Vector{InverseGamma{Float64}}, Vector{VarName{:s², Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{VarName{:m, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{VarName{:m, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, Model{typeof(gdemo), (:x, :y), (), (), Tuple{Float64, Int64}, Tuple{}, DefaultContext}, Sampler{NUTS{ForwardDiffAD{0}, (), DiagEuclideanMetric}}, DefaultContext}; gradientconfig::ForwardDiff.GradientConfig{ForwardDiff.Tag{Turing.TuringTag, Float64}, Float64, 2, Vector{ForwardDiff.Dual{ForwardDiff.Tag{Turing.TuringTag, Float64}, Float64, 2}}})

Closest candidates are:
  ADgradient(::Val{:ForwardDiff}, ::Any; chunk, tag, x) got unsupported keyword argument "gradientconfig"
   @ LogDensityProblemsADForwardDiffExt ~/.julia/packages/LogDensityProblemsAD/OQ0BL/ext/LogDensityProblemsADForwardDiffExt.jl:98
  ADgradient(::Val{kind}, ::Any; kwargs...) where kind
   @ LogDensityProblemsAD ~/.julia/packages/LogDensityProblemsAD/OQ0BL/src/LogDensityProblemsAD.jl:68
  ADgradient(::ADTypes.AutoForwardDiff{C}, ::Any) where C got unsupported keyword argument "gradientconfig"
   @ LogDensityProblemsADADTypesExt ~/.julia/packages/LogDensityProblemsAD/OQ0BL/ext/LogDensityProblemsADADTypesExt.jl:31
  ...


Stacktrace:
  [1] kwerr(::NamedTuple{(:gradientconfig,), Tuple{ForwardDiff.GradientConfig{ForwardDiff.Tag{Turing.TuringTag, Float64}, Float64, 2, Vector{ForwardDiff.Dual{ForwardDiff.Tag{Turing.TuringTag, Float64}, Float64, 2}}}}}, ::Function, ::Val{:ForwardDiff}, ::Turing.LogDensityFunction{TypedVarInfo{NamedTuple{(:s², :m), Tuple{DynamicPPL.Metadata{Dict{VarName{:s², Setfield.IdentityLens}, Int64}, Vector{InverseGamma{Float64}}, Vector{VarName{:s², Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{VarName{:m, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{VarName{:m, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, Model{typeof(gdemo), (:x, :y), (), (), Tuple{Float64, Int64}, Tuple{}, DefaultContext}, Sampler{NUTS{ForwardDiffAD{0}, (), DiagEuclideanMetric}}, DefaultContext})
    @ Base ./error.jl:165
  [2] ADgradient(ad::ForwardDiffAD{0, true}, ℓ::Turing.LogDensityFunction{TypedVarInfo{NamedTuple{(:s², :m), Tuple{DynamicPPL.Metadata{Dict{VarName{:s², Setfield.IdentityLens}, Int64}, Vector{InverseGamma{Float64}}, Vector{VarName{:s², Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{VarName{:m, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{VarName{:m, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, Model{typeof(gdemo), (:x, :y), (), (), Tuple{Float64, Int64}, Tuple{}, DefaultContext}, Sampler{NUTS{ForwardDiffAD{0}, (), DiagEuclideanMetric}}, DefaultContext})
    @ Turing.Essential ~/.julia/packages/Turing/UsWJl/src/essential/ad.jl:102
  [3] ADgradient(ℓ::Turing.LogDensityFunction{TypedVarInfo{NamedTuple{(:s², :m), Tuple{DynamicPPL.Metadata{Dict{VarName{:s², Setfield.IdentityLens}, Int64}, Vector{InverseGamma{Float64}}, Vector{VarName{:s², Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{VarName{:m, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{VarName{:m, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}, Model{typeof(gdemo), (:x, :y), (), (), Tuple{Float64, Int64}, Tuple{}, DefaultContext}, Sampler{NUTS{ForwardDiffAD{0}, (), DiagEuclideanMetric}}, DefaultContext})
    @ Turing.Essential ~/.julia/packages/Turing/UsWJl/src/essential/ad.jl:82
  [4] initialstep(rng::Random._GLOBAL_RNG, model::Model{typeof(gdemo), (:x, :y), (), (), Tuple{Float64, Int64}, Tuple{}, DefaultContext}, spl::Sampler{NUTS{ForwardDiffAD{0}, (), DiagEuclideanMetric}}, vi::TypedVarInfo{NamedTuple{(:s², :m), Tuple{DynamicPPL.Metadata{Dict{VarName{:s², Setfield.IdentityLens}, Int64}, Vector{InverseGamma{Float64}}, Vector{VarName{:s², Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, DynamicPPL.Metadata{Dict{VarName{:m, Setfield.IdentityLens}, Int64}, Vector{Normal{Float64}}, Vector{VarName{:m, Setfield.IdentityLens}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}}, Float64}; init_params::Nothing, nadapts::Int64, kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
    @ Turing.Inference ~/.julia/packages/Turing/UsWJl/src/inference/hmc.jl:161
  [5] step(rng::Random._GLOBAL_RNG, model::Model{typeof(gdemo), (:x, :y), (), (), Tuple{Float64, Int64}, Tuple{}, DefaultContext}, spl::Sampler{NUTS{ForwardDiffAD{0}, (), DiagEuclideanMetric}}; resume_from::Nothing, init_params::Nothing, kwargs::Base.Pairs{Symbol, Int64, Tuple{Symbol}, NamedTuple{(:nadapts,), Tuple{Int64}}})
    @ DynamicPPL ~/.julia/packages/DynamicPPL/UFajj/src/sampler.jl:111
  [6] macro expansion
    @ ~/.julia/packages/AbstractMCMC/fWWW0/src/sample.jl:125 [inlined]
  [7] macro expansion
    @ ~/.julia/packages/ProgressLogging/6KXlp/src/ProgressLogging.jl:328 [inlined]
  [8] (::AbstractMCMC.var"#21#22"{Bool, String, Nothing, Int64, Int64, Base.Pairs{Symbol, Int64, Tuple{Symbol}, NamedTuple{(:nadapts,), Tuple{Int64}}}, Random._GLOBAL_RNG, Model{typeof(gdemo), (:x, :y), (), (), Tuple{Float64, Int64}, Tuple{}, DefaultContext}, Sampler{NUTS{ForwardDiffAD{0}, (), DiagEuclideanMetric}}, Int64, Int64})()
    @ AbstractMCMC ~/.julia/packages/AbstractMCMC/fWWW0/src/logging.jl:12
  [9] with_logstate(f::Function, logstate::Any)
    @ Base.CoreLogging ./logging.jl:514
 [10] with_logger
    @ ./logging.jl:626 [inlined]
 [11] with_progresslogger(f::Function, _module::Module, logger::Logging.ConsoleLogger)
    @ AbstractMCMC ~/.julia/packages/AbstractMCMC/fWWW0/src/logging.jl:36
 [12] macro expansion
    @ ~/.julia/packages/AbstractMCMC/fWWW0/src/logging.jl:11 [inlined]
 [13] mcmcsample(rng::Random._GLOBAL_RNG, model::Model{typeof(gdemo), (:x, :y), (), (), Tuple{Float64, Int64}, Tuple{}, DefaultContext}, sampler::Sampler{NUTS{ForwardDiffAD{0}, (), DiagEuclideanMetric}}, N::Int64; progress::Bool, progressname::String, callback::Nothing, discard_initial::Int64, thinning::Int64, chain_type::Type, kwargs::Base.Pairs{Symbol, Int64, Tuple{Symbol}, NamedTuple{(:nadapts,), Tuple{Int64}}})
    @ AbstractMCMC ~/.julia/packages/AbstractMCMC/fWWW0/src/sample.jl:116
 [14] sample(rng::Random._GLOBAL_RNG, model::Model{typeof(gdemo), (:x, :y), (), (), Tuple{Float64, Int64}, Tuple{}, DefaultContext}, sampler::Sampler{NUTS{ForwardDiffAD{0}, (), DiagEuclideanMetric}}, N::Int64; chain_type::Type, resume_from::Nothing, progress::Bool, nadapts::Int64, discard_adapt::Bool, discard_initial::Int64, kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
    @ Turing.Inference ~/.julia/packages/Turing/UsWJl/src/inference/hmc.jl:133
 [15] sample
    @ ~/.julia/packages/Turing/UsWJl/src/inference/hmc.jl:103 [inlined]
 [16] #sample#2
    @ ~/.julia/packages/Turing/UsWJl/src/inference/Inference.jl:146 [inlined]
 [17] sample
    @ ~/.julia/packages/Turing/UsWJl/src/inference/Inference.jl:139 [inlined]
 [18] #sample#1
    @ ~/.julia/packages/Turing/UsWJl/src/inference/Inference.jl:136 [inlined]
 [19] sample(model::Model{typeof(gdemo), (:x, :y), (), (), Tuple{Float64, Int64}, Tuple{}, DefaultContext}, alg::NUTS{ForwardDiffAD{0}, (), DiagEuclideanMetric}, N::Int64)
    @ Turing.Inference ~/.julia/packages/Turing/UsWJl/src/inference/Inference.jl:130
 [20] top-level scope
    @ In[26]:1

Works fine for me on Julia 1.10.2 and Turing 0.30.7 (output below)

Output
julia> @model function gdemo(x, y)
           s² ~ InverseGamma(2, 3)
               m ~ Normal(0, sqrt(s²))
                   x ~ Normal(m, sqrt(s²))
                       y ~ Normal(m, sqrt(s²))
                       end
gdemo (generic function with 2 methods)

julia>

julia> c6 = sample(gdemo(1.5, 2), NUTS(0.65), 1000)
┌ Info: Found initial step size
└   ϵ = 0.8
Sampling 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| Time: 0:00:03
Chains MCMC chain (1000×14×1 Array{Float64, 3}):

Iterations        = 501:1:1500
Number of chains  = 1
Samples per chain = 1000
Wall duration     = 3.47 seconds
Compute duration  = 3.47 seconds
parameters        = s², m
internals         = lp, n_steps, is_accept, acceptance_rate, log_density, hamiltonian_energy, hamiltonian_energy_error, max_hamiltonian_energy_error, tree_depth, numerical_error, step_size, nom_step_size

Summary Statistics
  parameters      mean       std      mcse   ess_bulk   ess_tail      rhat   ess_per_sec
      Symbol   Float64   Float64   Float64    Float64    Float64   Float64       Float64

          s²    2.0410    1.7763    0.0807   526.1108   574.8890    1.0012      151.4424
           m    1.2093    0.8292    0.0354   542.7964   642.2759    1.0054      156.2454

Quantiles
  parameters      2.5%     25.0%     50.0%     75.0%     97.5%
      Symbol   Float64   Float64   Float64   Float64   Float64

          s²    0.5958    1.0692    1.5760    2.4660    5.6401
           m   -0.3197    0.6868    1.1924    1.6953    2.8859

Thanks - I figure out the issue. From your reply - I realised my Turing was outdated. I updated my Turing version and now it is working. Thanks again.