I am trying to replicate the example in the README of DiffEqFlux https://github.com/JuliaDiffEq/DiffEqFlux.jl. Calling the neural_ode generated function makes Julia exit before training could begin.

The code is

using DifferentialEquations

using Flux, DiffEqFluxfunction lotka_volterra(du,u,p,t)

x, y = u

α, β, δ, γ = p

du[1] = dx = αx - βxyy + γ

du[2] = dy = -δxy

end

u0 = [1.0,1.0]

tspan = (0.0,10.0)

p = [1.5,1.0,3.0,1.0]

prob = ODEProblem(lotka_volterra,u0,tspan,p)

ode_data = Array(solve(prob,Tsit5(),saveat=0.1))dudt = Chain(Dense(2,50,tanh),Dense(50,2))

tspan = (0.0f0,10.0f0)

n_ode = x->neural_ode(x,dudt,tspan,Tsit5(),saveat=0.1)function predict_n_ode()

n_ode(u0)

end

loss_n_ode() = sum(abs2,ode_data .- predict_n_ode())data = Iterators.repeated((), 100)

opt = ADAM(0.1)cb = function () #callback function to observe training

display(loss_n_ode())

endprintln(“Before crashing”)

n_ode(u0)

println(“After crashing”)