DiffEqFlux and Lux/Flux

I’m getting an error using sciml to train a DiffEqFlux tutorial. I used the same exact code with some modifications last summer with no problems but can’t seem to find a solution. I am using Julia v.1.7.1

using DiffEqFlux, OrdinaryDiffEq, Flux, Optim, Plots


u0 = Float32[2.0; 0.0]
datasize = 30
tspan = (0.0f0, 1.5f0)
tsteps = range(tspan[1], tspan[2], length = datasize)

function trueODEfunc(du, u, p, t)
    true_A = [-0.1 2.0; -2.0 -0.1]
    du .= ((u.^3)'true_A)'
end

prob_trueode = ODEProblem(trueODEfunc, u0, tspan)
ode_data = Array(solve(prob_trueode, Tsit5(), saveat = tsteps))

dudt2 = FastChain((x, p) -> x.^3,
                  FastDense(2, 50, tanh),
                  FastDense(50, 2))

prob_neuralode = NeuralODE(dudt2, tspan, Tsit5(), saveat = tsteps)

function predict_neuralode(p)
  Array(prob_neuralode(u0, p))
end

function loss_neuralode(p)
    pred = predict_neuralode(p)
    loss = sum(abs2, ode_data .- pred)
    return loss, pred
end

# Callback function to observe training
list_plots = []
iter = 0
callback = function ( l, pred; doplot = false)
  global list_plots, iter

  if iter == 0
    list_plots = []
  end
  iter += 1

  display(l)

  # plot current prediction against data
  plt = scatter(tsteps, ode_data[1,:], label = "data")
  scatter!(plt, tsteps, pred[1,:], label = "prediction")
  push!(list_plots, plt)
  if doplot
    display(plot(plt))
  end

  return false
end

result_neuralode = DiffEqFlux.sciml_train(loss_neuralode, prob_neuralode.p,
                                          ADAM(0.05), cb = callback,
                                          maxiters = 300)

With the error being:

MethodError: no method matching (OptimizationFunction{false, GalacticOptim.AutoZygote, OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{typeof(loss_neuralode)}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, GalacticOptim.var"#268#278"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{typeof(loss_neuralode)}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#271#281"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{typeof(loss_neuralode)}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#276#286", Nothing, Nothing, Nothing})(::OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{typeof(loss_neuralode)}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, ::GalacticOptim.AutoZygote, ::GalacticOptim.var"#268#278"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{typeof(loss_neuralode)}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, ::GalacticOptim.var"#271#281"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{typeof(loss_neuralode)}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, ::GalacticOptim.var"#276#286", ::Nothing, ::Nothing, ::Nothing)

Stacktrace:
 [1] instantiate_function(f::Function, x::Vector{Float32}, ::GalacticOptim.AutoZygote, p::Nothing, num_cons::Int64)
   @ GalacticOptim C:\Users\User 1\.julia\packages\GalacticOptim\fow0r\src\function\zygote.jl:40
 [2] instantiate_function(f::Function, x::Vector{Float32}, ::GalacticOptim.AutoZygote, p::Nothing)
   @ GalacticOptim C:\Users\User 1\.julia\packages\GalacticOptim\fow0r\src\function\zygote.jl:4
 [3] sciml_train(::typeof(loss_neuralode), ::Vector{Float32}, ::ADAM, ::Nothing; lower_bounds::Nothing, upper_bounds::Nothing, maxiters::Int64, kwargs::Base.Pairs{Symbol, var"#43#45", Tuple{Symbol}, NamedTuple{(:cb,), Tuple{var"#43#45"}}})
   @ DiffEqFlux C:\Users\User 1\.julia\packages\DiffEqFlux\gH716\src\train.jl:87

I’m not sure if it could be the specific version of Julia that I’m using. This is directly from a tutorial at Optimization of Ordinary Differential Equations · DiffEqFlux.jl

Any thoughts?

For some reason, you found the docs from 2 years ago. This has changed tremendously. Use the real docs:

https://diffeqflux.sciml.ai/dev/examples/neural_ode/

1 Like

I have loaded the specified libraries and ran the code which results in the error:
MethodError: no method matching (::NeuralODE{Lux.Chain{NamedTuple{(:layer_1, :layer_2, :layer_3), Tuple{ActivationFunction{var"#1#2"}, Lux.Dense{true, typeof(tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, Lux.Dense{true, typeof(identity), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}}}}, Vector{Bool}, Optimisers.Restructure{Lux.Chain{NamedTuple{(:layer_1, :layer_2, :layer_3), Tuple{ActivationFunction{var"#1#2"}, Lux.Dense{true, typeof(tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, Lux.Dense{true, typeof(identity), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}}}}, Tuple{}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, StepRangeLen{Float32, Float64, Float64, Int64}, Tuple{Symbol}, NamedTuple{(:saveat,), Tuple{StepRangeLen{Float32, Float64, Float64, Int64}}}}})(::Vector{Float32}, ::ComponentArrays.ComponentVector{Float32, Vector{Float32}, Tuple{ComponentArrays.Axis{(layer_1 = 1:0, layer_2 = ViewAxis(1:150, Axis(weight = ViewAxis(1:100, ShapedAxis((50, 2), NamedTuple())), bias = ViewAxis(101:150, ShapedAxis((50, 1), NamedTuple())))), layer_3 = ViewAxis(151:252, Axis(weight = ViewAxis(1:100, ShapedAxis((2, 50), NamedTuple())), bias = ViewAxis(101:102, ShapedAxis((2, 1), NamedTuple())))))}}}, ::NamedTuple{(:layer_1, :layer_2, :layer_3), Tuple{NamedTuple{(), Tuple{}}, NamedTuple{(), Tuple{}}, NamedTuple{(), Tuple{}}}})

Closest candidates are:
(::NeuralODE)(::Any, ::Any) at C:\Users\User 1.julia\packages\DiffEqFlux\Te0lc\src\neural_de.jl:65(::NeuralODE)(::Any) at C:\Users\User 1.julia\packages\DiffEqFlux\Te0lc\src\neural_de.jl:65

For some reason it works for me inside of Atom instead of a Jupyter notebook both with the same version of Julia. Thank you for your help @ChrisRackauckas .

It’s not you, it’s me. I realized I posted a link to the brand new docs, but didn’t do that package release so I just went and did it :sweat_smile:. Sorry for the hiccup.

1 Like