NeuralPDE tutorial not running

I am new to NeuralPDE.jl. I tried the second tutorial for PDE PINN (defining systems of PDEs for PINNs)

https://docs.sciml.ai/NeuralPDE/stable/tutorials/systems/#systems

But I got the error message below for the line:

res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 5000)

ERROR: type OptimizationState has no field depvar

I am using VSCode on Mac M2 and Julia 1.10.0. I created a new environment and added the latest of all the required packages.

Any help would be greatly appreciated!


Stacktrace:
[1] getproperty
@ Base ./Base.jl:37 [inlined]
[2] macro expansion
@ NeuralPDE ~/.julia/packages/NeuralPDE/5b5Gb/src/discretize.jl:140 [inlined]
[3] macro expansion
@ NeuralPDE ~/.julia/packages/RuntimeGeneratedFunctions/Yo8zx/src/RuntimeGeneratedFunctions.jl:163 [inlined]
[4] macro expansion
@ NeuralPDE ./none:0 [inlined]
[5] generated_callfunc(::RuntimeGeneratedFunctions.RuntimeGeneratedFunction{…}, ::Matrix{…}, ::Optimization.OptimizationState{…}, ::Vector{…}, ::typeof(NeuralPDE.numeric_derivative), ::NeuralPDE.var"#300#307"{…}, ::NeuralPDE.var"#32#33", ::Nothing)
@ NeuralPDE ./none:0
[6] (::RuntimeGeneratedFunctions.RuntimeGeneratedFunction{…})(::Matrix{…}, ::Optimization.OptimizationState{…}, ::Vector{…}, ::Function, ::Function, ::Function, ::Nothing)
@ RuntimeGeneratedFunctions ~/.julia/packages/RuntimeGeneratedFunctions/Yo8zx/src/RuntimeGeneratedFunctions.jl:150
[7] (::NeuralPDE.var"#240#241"{…})(cord::Matrix{…}, θ::Optimization.OptimizationState{…})
@ NeuralPDE ~/.julia/packages/NeuralPDE/5b5Gb/src/discretize.jl:161
[8] (::NeuralPDE.var"#integrand#138"{…})(x::Matrix{…}, θ::Optimization.OptimizationState{…})
@ NeuralPDE ~/.julia/packages/NeuralPDE/5b5Gb/src/training_strategies.jl:316
[9] (::BatchIntegralFunction{…})(::Matrix{…}, ::Vararg{…})
@ SciMLBase ~/.julia/packages/SciMLBase/QSc1r/src/scimlfunctions.jl:2200
[10] __solvebp_call(prob::IntegralProblem{…}, alg::Integrals.CubatureJLh, sensealg::Integrals.ReCallVJP{…}, domain::Tuple{…}, p::Optimization.OptimizationState{…}; reltol::Float64, abstol::Float64, maxiters::Int64)
@ IntegralsCubatureExt ~/.julia/packages/Integrals/CazE7/ext/IntegralsCubatureExt.jl:23
[11] __solvebp_call(::Integrals.IntegralCache{…}, ::Integrals.CubatureJLh, ::Vararg{…}; kwargs::@Kwargs{…})
@ Integrals ~/.julia/packages/Integrals/CazE7/src/common.jl:115
[12] solvebp(::Integrals.IntegralCache{…}, ::Vararg{…}; kwargs::@Kwargs{…})
@ Integrals ~/.julia/packages/Integrals/CazE7/src/Integrals.jl:65
[13] solve!(cache::Integrals.IntegralCache{…})
@ Integrals ~/.julia/packages/Integrals/CazE7/src/common.jl:105
[14] solve(prob::IntegralProblem{…}, alg::Integrals.CubatureJLh; kwargs::@Kwargs{…})
@ Integrals ~/.julia/packages/Integrals/CazE7/src/common.jl:101
[15] solve
@ ~/.julia/packages/Integrals/CazE7/src/common.jl:98 [inlined]
[16] (::NeuralPDE.var"#134#137"{…})(lb::Vector{…}, ub::Vector{…}, loss
::NeuralPDE.var"#240#241"{…}, θ::Optimization.OptimizationState{…})
@ NeuralPDE ~/.julia/packages/NeuralPDE/5b5Gb/src/training_strategies.jl:320
[17] (::NeuralPDE.var"#135#139"{…})(θ::Optimization.OptimizationState{…})
@ NeuralPDE ~/.julia/packages/NeuralPDE/5b5Gb/src/training_strategies.jl:326
[18] (::var"#8#11"{…})(l
::NeuralPDE.var"#135#139"{…})
@ Main ~/Library/CloudStorage/GoogleDrive-stju@g.ucla.edu/My Drive/STJulia/ST_PINN/NeuralPDE_tutorial_1.jl:45
[19] iterate(g::Base.Generator, s::Vararg{Any})
@ Base ./generator.jl:47 [inlined]
[20] _collect(c::Vector{…}, itr::Base.Generator{…}, ::Base.EltypeUnknown, isz::Base.HasShape{…})
@ Base ./array.jl:854
[21] collect_similar(cont::Vector{…}, itr::Base.Generator{…})
@ Base ./array.jl:763
[22] map(f::Function, A::Vector{NeuralPDE.var"#135#139"{…} where loss_function})
@ Base ./abstractarray.jl:3282
[23] (::var"#7#10")(p::Optimization.OptimizationState{…}, l::Float64)
@ Main ~/Library/CloudStorage/GoogleDrive-stju@g.ucla.edu/My Drive/STJulia/ST_PINN/NeuralPDE_tutorial_1.jl:45
[24] (::OptimizationOptimJL.var"#_cb#12"{OptimizationCache{…}})(trace::OptimizationState{Float64, BFGS{…}})
@ OptimizationOptimJL ~/.julia/packages/OptimizationOptimJL/yMF3E/src/OptimizationOptimJL.jl:142
[25] update!(tr::Vector{…}, iteration::Int64, f_x::Float64, grnorm::Float64, dt::Dict{…}, store_trace::Bool, show_trace::Bool, show_every::Int64, callback::OptimizationOptimJL.var"#_cb#12"{…}, trace_simplex::Bool)
@ Optim ~/.julia/packages/Optim/EJwLF/src/utilities/update.jl:25
[26] update!(tr::Vector{…}, iteration::Int64, f_x::Float64, grnorm::Float64, dt::Dict{…}, store_trace::Bool, show_trace::Bool, show_every::Int64, callback::OptimizationOptimJL.var"#_cb#12"{…}, trace_simplex::Bool)
@ Optim ~/.julia/packages/Optim/EJwLF/src/utilities/update.jl:11 [inlined]
[27] trace!(tr::Vector{…}, d::TwiceDifferentiable{…}, state::Optim.BFGSState{…}, iteration::Int64, method::BFGS{…}, options::Optim.Options{…}, curr_time::Float64)
@ Optim ~/.julia/packages/Optim/EJwLF/src/multivariate/solvers/first_order/bfgs.jl:191
[28] optimize(d::TwiceDifferentiable{…}, initial_x::ComponentArrays.ComponentVector{…}, method::BFGS{…}, options::Optim.Options{…}, state::Optim.BFGSState{…})
@ Optim ~/.julia/packages/Optim/EJwLF/src/multivariate/optimize/optimize.jl:50
[29] optimize(d::TwiceDifferentiable{…}, initial_x::ComponentArrays.ComponentVector{…}, method::BFGS{…}, options::Optim.Options{…})
@ Optim ~/.julia/packages/Optim/EJwLF/src/multivariate/optimize/optimize.jl:36
[30] __solve(cache::OptimizationCache{…})
@ OptimizationOptimJL ~/.julia/packages/OptimizationOptimJL/yMF3E/src/OptimizationOptimJL.jl:212
[31] solve!(cache::OptimizationCache{…})
@ SciMLBase ~/.julia/packages/SciMLBase/QSc1r/src/solve.jl:179
[32] solve(::OptimizationProblem{…}, ::BFGS{…}; kwargs::@Kwargs{…})
@ SciMLBase ~/.julia/packages/SciMLBase/QSc1r/src/solve.jl:96
[33] top-level scope
@ ~/Library/CloudStorage/GoogleDrive-stju@g.ucla.edu/My Drive/STJulia/ST_PINN/NeuralPDE_tutorial_1.jl:50
Some type information was truncated. Use show(err) to see complete types.

What version? ]st?

Oh, sorry for not including that info. Here it is.

[b2108857] Lux v0.5.17
[961ee093] ModelingToolkit v8.75.0
[315f7962] NeuralPDE v5.11.0
[7f7a1694] Optimization v3.22.0
[36348300] OptimizationOptimJL v0.2.2
[91a5bcdd] Plots v1.40.1

Was that in a fresh REPL?

Yes. I made a dedicated environment to run these tutorials for NeuralPDE.

Thanks for looking into this!

The tutorial is slightly outdated. The issue is in the callback functions.

callback = function (p, l)
           println("loss: ", l)
           println("pde_losses: ", map(l_ -> l_(p.u), pde_inner_loss_functions))
           println("bcs_losses: ", map(l_ -> l_(p.u), bcs_inner_loss_functions))
           return false
       end

should fix it. Notice doing p.u instead of p. This is because of OptimizationOptimJL@0.2 introduced OptimizationState ([WIP]Stats and State by Vaibhavdixit02 · Pull Request #652 · SciML/Optimization.jl · GitHub).

1 Like

Thanks!

thanks.