DiffEqFlux and Lux/Flux

I’m getting an error using sciml to train a DiffEqFlux tutorial. I used the same exact code with some modifications last summer with no problems but can’t seem to find a solution. I am using Julia v.1.7.1

using DiffEqFlux, OrdinaryDiffEq, Flux, Optim, Plots


u0 = Float32[2.0; 0.0]
datasize = 30
tspan = (0.0f0, 1.5f0)
tsteps = range(tspan[1], tspan[2], length = datasize)

function trueODEfunc(du, u, p, t)
    true_A = [-0.1 2.0; -2.0 -0.1]
    du .= ((u.^3)'true_A)'
end

prob_trueode = ODEProblem(trueODEfunc, u0, tspan)
ode_data = Array(solve(prob_trueode, Tsit5(), saveat = tsteps))

dudt2 = FastChain((x, p) -> x.^3,
                  FastDense(2, 50, tanh),
                  FastDense(50, 2))

prob_neuralode = NeuralODE(dudt2, tspan, Tsit5(), saveat = tsteps)

function predict_neuralode(p)
  Array(prob_neuralode(u0, p))
end

function loss_neuralode(p)
    pred = predict_neuralode(p)
    loss = sum(abs2, ode_data .- pred)
    return loss, pred
end

# Callback function to observe training
list_plots = []
iter = 0
callback = function ( l, pred; doplot = false)
  global list_plots, iter

  if iter == 0
    list_plots = []
  end
  iter += 1

  display(l)

  # plot current prediction against data
  plt = scatter(tsteps, ode_data[1,:], label = "data")
  scatter!(plt, tsteps, pred[1,:], label = "prediction")
  push!(list_plots, plt)
  if doplot
    display(plot(plt))
  end

  return false
end

result_neuralode = DiffEqFlux.sciml_train(loss_neuralode, prob_neuralode.p,
                                          ADAM(0.05), cb = callback,
                                          maxiters = 300)

With the error being:

MethodError: no method matching (OptimizationFunction{false, GalacticOptim.AutoZygote, OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{typeof(loss_neuralode)}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, GalacticOptim.var"#268#278"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{typeof(loss_neuralode)}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#271#281"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{typeof(loss_neuralode)}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#276#286", Nothing, Nothing, Nothing})(::OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{typeof(loss_neuralode)}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, ::GalacticOptim.AutoZygote, ::GalacticOptim.var"#268#278"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{typeof(loss_neuralode)}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, ::GalacticOptim.var"#271#281"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{typeof(loss_neuralode)}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, ::GalacticOptim.var"#276#286", ::Nothing, ::Nothing, ::Nothing)

Stacktrace:
 [1] instantiate_function(f::Function, x::Vector{Float32}, ::GalacticOptim.AutoZygote, p::Nothing, num_cons::Int64)
   @ GalacticOptim C:\Users\User 1\.julia\packages\GalacticOptim\fow0r\src\function\zygote.jl:40
 [2] instantiate_function(f::Function, x::Vector{Float32}, ::GalacticOptim.AutoZygote, p::Nothing)
   @ GalacticOptim C:\Users\User 1\.julia\packages\GalacticOptim\fow0r\src\function\zygote.jl:4
 [3] sciml_train(::typeof(loss_neuralode), ::Vector{Float32}, ::ADAM, ::Nothing; lower_bounds::Nothing, upper_bounds::Nothing, maxiters::Int64, kwargs::Base.Pairs{Symbol, var"#43#45", Tuple{Symbol}, NamedTuple{(:cb,), Tuple{var"#43#45"}}})
   @ DiffEqFlux C:\Users\User 1\.julia\packages\DiffEqFlux\gH716\src\train.jl:87

I’m not sure if it could be the specific version of Julia that I’m using. This is directly from a tutorial at Optimization of Ordinary Differential Equations · DiffEqFlux.jl

Any thoughts?

1 Like

For some reason, you found the docs from 2 years ago. This has changed tremendously. Use the real docs:

https://diffeqflux.sciml.ai/dev/examples/neural_ode/

1 Like

I have loaded the specified libraries and ran the code which results in the error:
MethodError: no method matching (::NeuralODE{Lux.Chain{NamedTuple{(:layer_1, :layer_2, :layer_3), Tuple{ActivationFunction{var"#1#2"}, Lux.Dense{true, typeof(tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, Lux.Dense{true, typeof(identity), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}}}}, Vector{Bool}, Optimisers.Restructure{Lux.Chain{NamedTuple{(:layer_1, :layer_2, :layer_3), Tuple{ActivationFunction{var"#1#2"}, Lux.Dense{true, typeof(tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, Lux.Dense{true, typeof(identity), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}}}}, Tuple{}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, StepRangeLen{Float32, Float64, Float64, Int64}, Tuple{Symbol}, NamedTuple{(:saveat,), Tuple{StepRangeLen{Float32, Float64, Float64, Int64}}}}})(::Vector{Float32}, ::ComponentArrays.ComponentVector{Float32, Vector{Float32}, Tuple{ComponentArrays.Axis{(layer_1 = 1:0, layer_2 = ViewAxis(1:150, Axis(weight = ViewAxis(1:100, ShapedAxis((50, 2), NamedTuple())), bias = ViewAxis(101:150, ShapedAxis((50, 1), NamedTuple())))), layer_3 = ViewAxis(151:252, Axis(weight = ViewAxis(1:100, ShapedAxis((2, 50), NamedTuple())), bias = ViewAxis(101:102, ShapedAxis((2, 1), NamedTuple())))))}}}, ::NamedTuple{(:layer_1, :layer_2, :layer_3), Tuple{NamedTuple{(), Tuple{}}, NamedTuple{(), Tuple{}}, NamedTuple{(), Tuple{}}}})

Closest candidates are:
(::NeuralODE)(::Any, ::Any) at C:\Users\User 1.julia\packages\DiffEqFlux\Te0lc\src\neural_de.jl:65(::NeuralODE)(::Any) at C:\Users\User 1.julia\packages\DiffEqFlux\Te0lc\src\neural_de.jl:65

For some reason it works for me inside of Atom instead of a Jupyter notebook both with the same version of Julia. Thank you for your help @ChrisRackauckas .

It’s not you, it’s me. I realized I posted a link to the brand new docs, but didn’t do that package release so I just went and did it :sweat_smile:. Sorry for the hiccup.

2 Likes

Dear all,
First of all, thank you very much for this supportive environment here.

Unfortunately, I am neither very familiar with neural nets nor with details in Julia. Therefore, I face problems when trying to program a forecast solution which incorporates an ODE NN.
I managed to modify the example here
https://docs.sciml.ai/DiffEqFlux/dev/examples/neural_ode/
so that I calculates a logistic differential equation (du1 = a1u1+a2u1^2 ) - a first order homogenous differential equation. Optimizing works well and the comparison with the exact solution and its direction field is good. So far, from the ODE-solving point of view it seems to be correct. I also added the possibility that Lux.chain takes the mathematical basis from a separate function. A possibility which I found in another example code.

math_law(x) = 2.4f0.*x.-1.1f0.*x.^2
dudt2 = Lux.Chain(x → math_law(x),…

But now, I do not find a way how to proceed with a maybe trivial problem. I want to make a simple forecast starting from the end of the interval tspan. The description of the package Lux, one of the packages which are incorporated in the code, did not help me further. All I tried led to various error messages.
The difficulty, simply spoken, for me is to evaluate the NN (which represents, if I understood correctly, the ODE) to obtain a value back and to solve (to integrate) the neural ODE (using DiffEqFlux and Lux). And here I kindly ask for your support.
Many thanks in advance.

using Lux, DiffEqFlux, DifferentialEquations, Optimization, OptimizationOptimJL, Random, Plots
gr()

u0       = Float32[0.5] # initial condition u(t=0)
datasize = 30
tspan    = (0.0f0, 2.0f0)
tsteps   = range(tspan[1], tspan[2], length = datasize)
#______________________________________________________________________________
# logistic differential equation LogDiffEqu
function LogDiffEqu(du, u, p, t)
    A     = [2.4 -1.1] 
    du[1] = A[1]*u[1]+A[2]*u[1]^2
end
prob_LogDiffEqu = ODEProblem(LogDiffEqu, u0, tspan)
sol_LogDiffEqu = Array(solve(prob_LogDiffEqu, Tsit5(), saveat = tsteps))
#______________________________________________________________________________
# shaping information for the NN
math_law(x) = 2.0f0.*x.-1.0f0.*x.^2  # mention "f0", preliminary
# Construct the layer
dudt2 = Lux.Chain(x -> math_law(x),Lux.Dense(1, 50, tanh),Lux.Dense(50, 1))
rng = Random.default_rng()
# Initialization
p, st = Lux.setup(rng, dudt2)
#_______________________________________________________________________________
prob_neuralode = NeuralODE(dudt2, tspan, Tsit5(), saveat = tsteps)
#______________________________________________________________________________
function predict_neuralode(p)
  Array(prob_neuralode(u0, p, st)[1])
end
#______________________________________________________________________________
function loss_neuralode(p)
    pred = predict_neuralode(p)
    loss = sum(abs2.(sol_LogDiffEqu .- pred))
    return loss, pred
end
#______________________________________________________________________________
callback = function (p, l, pred; doplot = false)
    # println(l) # Ausgabe der erreichten Genauigkeit
    # plot current prediction against data
    if doplot
      plt = Plots.scatter(tsteps, sol_LogDiffEqu[1,:], label = "data")  
      Plots.scatter!(plt, tsteps, pred[1,:], label = "prediction")
      display(plot(plt))
    end
    return false
end
#______________________________________________________________________________
pinit = Lux.ComponentArray(p)
callback(pinit, loss_neuralode(pinit)...; doplot=true)
#______________________________________________________________________________
# use Optimization.jl to solve the problem
adtype  = Optimization.AutoZygote()
optf    = Optimization.OptimizationFunction((x, p) -> loss_neuralode(x), adtype)
optprob = Optimization.OptimizationProblem(optf, pinit)
result_neuralode = Optimization.solve(optprob,ADAM(0.05),callback = callback,maxiters = 100)
# Retrain using the LBFGS optimizer
optprob2 = remake(optprob,u0 = result_neuralode.u)
result_neuralode2 = Optimization.solve(optprob2,Optim.BFGS(initial_stepnorm=0.01),callback=callback,allow_f_increases = false)
callback(result_neuralode2.u, loss_neuralode(result_neuralode2.u)...; doplot=true)
#______________________________________________________________________________

Make a new thread with an appropriate title so people can find it and answer it.

OK, thank you. I will do so soon.

Moved to:
Forecasting time series with NeuralODE, Lux, DiffEqFlux