I am able to minimize the loss using ADAM to some extent… but here is the error i get after I try to reduce it using BFGS()
.
Any ideas on how to resolve this ?
Warning: AD methods failed, using numerical differentiation. To debug, try ForwardDiff.gradient(loss, θ) or Zygote.gradient(loss, θ)
└ @ DiffEqFlux ~/.julia/packages/DiffEqFlux/jpIWG/src/train.jl:71
ERROR: MethodError: objects of type Float32 are not callable
Stacktrace:
[1] (::DiffEqFlux.var"#82#87"{Float32})(x::Vector{Float32}, p::Nothing)
@ DiffEqFlux ~/.julia/packages/DiffEqFlux/jpIWG/src/train.jl:84
[2] (::GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing})(::Vector{Float32})
@ GalacticOptim ~/.julia/packages/GalacticOptim/DHxE0/src/function/finitediff.jl:11
[3] (::GalacticOptim.var"#181#187"{Tuple{}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}})(x::Vector{Float32})
@ GalacticOptim ~/.julia/packages/GalacticOptim/DHxE0/src/function/finitediff.jl:14
[4] #finite_difference_gradient!#16
@ ~/.julia/packages/FiniteDiff/msXcU/src/gradients.jl:240 [inlined]
[5] finite_difference_gradient!
@ ~/.julia/packages/FiniteDiff/msXcU/src/gradients.jl:224 [inlined]
[6] (::GalacticOptim.var"#180#186"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}})(::Vector{Float32}, ::Vector{Float32})
@ GalacticOptim ~/.julia/packages/GalacticOptim/DHxE0/src/function/finitediff.jl:14
[7] (::GalacticOptim.var"#130#138"{OptimizationProblem{false, OptimizationFunction{false, GalacticOptim.AutoFiniteDiff, OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, GalacticOptim.var"#180#186"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#182#188"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#184#190", Nothing, Nothing, Nothing}, Vector{Float32}, SciMLBase.NullParameters, Nothing, Nothing, Nothing, Nothing, Base.Iterators.Pairs{Symbol, Any, Tuple{Symbol, Symbol}, NamedTuple{(:cb, :g_tol), Tuple{var"#76#80", Float32}}}}, GalacticOptim.var"#129#137"{OptimizationProblem{false, OptimizationFunction{false, GalacticOptim.AutoFiniteDiff, OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, GalacticOptim.var"#180#186"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#182#188"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#184#190", Nothing, Nothing, Nothing}, Vector{Float32}, SciMLBase.NullParameters, Nothing, Nothing, Nothing, Nothing, Base.Iterators.Pairs{Symbol, Any, Tuple{Symbol, Symbol}, NamedTuple{(:cb, :g_tol), Tuple{var"#76#80", Float32}}}}, OptimizationFunction{false, GalacticOptim.AutoFiniteDiff, OptimizationFunction{false, GalacticOptim.AutoFiniteDiff, OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, GalacticOptim.var"#180#186"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#182#188"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#184#190", Nothing, Nothing, Nothing}, GalacticOptim.var"#180#186"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#182#188"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#184#190", Nothing, Nothing, Nothing}}, OptimizationFunction{false, GalacticOptim.AutoFiniteDiff, OptimizationFunction{false, GalacticOptim.AutoFiniteDiff, OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, GalacticOptim.var"#180#186"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#182#188"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#184#190", Nothing, Nothing, Nothing}, GalacticOptim.var"#180#186"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#182#188"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#184#190", Nothing, Nothing, Nothing}})(G::Vector{Float32}, θ::Vector{Float32})
@ GalacticOptim ~/.julia/packages/GalacticOptim/DHxE0/src/solve/optim.jl:93
[8] value_gradient!!(obj::TwiceDifferentiable{Float32, Vector{Float32}, Matrix{Float32}, Vector{Float32}}, x::Vector{Float32})
@ NLSolversBase ~/.julia/packages/NLSolversBase/cfJrN/src/interface.jl:82
[9] initial_state(method::BFGS{LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Nothing, Float32, Flat}, options::Optim.Options{Float64, GalacticOptim.var"#_cb#136"{var"#76#80", BFGS{LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Nothing, Float32, Flat}, Base.Iterators.Cycle{Tuple{GalacticOptim.NullData}}}}, d::TwiceDifferentiable{Float32, Vector{Float32}, Matrix{Float32}, Vector{Float32}}, initial_x::Vector{Float32})
@ Optim ~/.julia/packages/Optim/rES57/src/multivariate/solvers/first_order/bfgs.jl:94
[10] optimize(d::TwiceDifferentiable{Float32, Vector{Float32}, Matrix{Float32}, Vector{Float32}}, initial_x::Vector{Float32}, method::BFGS{LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Nothing, Float32, Flat}, options::Optim.Options{Float64, GalacticOptim.var"#_cb#136"{var"#76#80", BFGS{LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Nothing, Float32, Flat}, Base.Iterators.Cycle{Tuple{GalacticOptim.NullData}}}})
@ Optim ~/.julia/packages/Optim/rES57/src/multivariate/optimize/optimize.jl:35
[11] ___solve(prob::OptimizationProblem{false, OptimizationFunction{false, GalacticOptim.AutoFiniteDiff, OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, GalacticOptim.var"#180#186"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#182#188"{Vector{Float32}, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, GalacticOptim.var"#179#185"{OptimizationFunction{true, GalacticOptim.AutoFiniteDiff{Val{:forward}, Val{:hcentral}}, DiffEqFlux.var"#82#87"{Float32}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#184#190", Nothing, Nothing, Nothing}, Vector{Float32}, SciMLBase.NullParameters, Nothing, Nothing, Nothing, Nothing, Base.Iterators.Pairs{Symbol, Any, Tuple{Symbol, Symbol}, NamedTuple{(:cb, :g_tol), Tuple{var"#76#80", Float32}}}}, opt::BFGS{LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Nothing, Float32, Flat}, data::Base.Iterators.Cycle{Tuple{GalacticOptim.NullData}}; cb::Function, maxiters::Int64, maxtime::Nothing, abstol::Nothing, reltol::Nothing, progress::Bool, kwargs::Base.Iterators.Pairs{Symbol, Float32, Tuple{Symbol}, NamedTuple{(:g_tol,), Tuple{Float32}}})
@ GalacticOptim ~/.julia/packages/GalacticOptim/DHxE0/src/solve/optim.jl:129
[12] #__solve#127
@ ~/.julia/packages/GalacticOptim/DHxE0/src/solve/optim.jl:49 [inlined]
[13] #solve#476
@ ~/.julia/packages/SciMLBase/x3z0g/src/solve.jl:3 [inlined]
[14] sciml_train(::Float32, ::Vector{Float32}, ::BFGS{LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Nothing, Float32, Flat}, ::Nothing; lower_bounds::Nothing, upper_bounds::Nothing, maxiters::Int64, kwargs::Base.Iterators.Pairs{Symbol, Any, Tuple{Symbol, Symbol}, NamedTuple{(:cb, :g_tol), Tuple{var"#76#80", Float32}}})
@ DiffEqFlux ~/.julia/packages/DiffEqFlux/jpIWG/src/train.jl:89
[15] optimise_p(p_init::Vector{Float32}, prob::ODEProblem{Vector{Float32}, Tuple{Float32, Float32}, true, Vector{Float32}, ODEFunction{true, ModelingToolkit.var"#f#253"{RuntimeGeneratedFunctions.RuntimeGeneratedFunction{(:ˍ₋arg1, :ˍ₋arg2, :t), ModelingToolkit.var"#_RGF_ModTag", ModelingToolkit.var"#_RGF_ModTag", (0x2f80bcbe, 0xac1fa29b, 0xb0dcfa15, 0x5640cd1e, 0x8eaf4a5d)}, RuntimeGeneratedFunctions.RuntimeGeneratedFunction{(:ˍ₋out, :ˍ₋arg1, :ˍ₋arg2, :t), ModelingToolkit.var"#_RGF_ModTag", ModelingToolkit.var"#_RGF_ModTag", (0x0ecdd92a, 0xc8ee1a18, 0xcba5708e, 0x17895940, 0xca0c871c)}}, UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Vector{Symbol}, Symbol, ModelingToolkit.var"#246#generated_observed#260"{Bool, ODESystem, Dict{Any, Any}}, Nothing}, Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, i::Int64, tend::Float32)
@ Main ~/nikhil/s rao reduction of chem reaction/model_red_julia/mainusing jacobian.jl:234
[16] top-level scope
@ none:1