Hello, i am trying to fit parameters of an ODE system to data. In this case, not all of the componenets have the same time points where measurements are taken. If i set up the loss function and plug it into sciml_train, i get an error where i just don’t seem to find a solution for. Can anybody help me out?
using Plots
using DifferentialEquations
using DiffEqFlux
using HDF5
pyplot()
using DiffEqParamEstim
num_eq = 49
function Test_ODE(du,u,p,t)
du[1] = p[1]
du[2] = p[2]
du[3] = p[3]
du[4] = p[4]
du[5] = p[5]
du[6] = p[6]
du[7] = p[7]
du[8] = p[8]
du[9] = p[9]
du[10] = p[10]
du[11] = p[11]
du[12] = p[12]
du[13] = p[13]
du[14] = p[14]
du[15] = p[15]
du[16] = p[16]
du[17] = p[17]
du[18] = p[18]
du[19] = p[19]
du[20] = p[20]
du[21] = p[21]
du[22] = p[22]
du[23] = p[23]
du[24] = p[24]
du[25] = p[25]
du[26] = p[26]
du[27] = p[27]
du[28] = p[28]
du[29] = p[29]
du[30] = p[30]
du[31] = p[31]
du[32] = p[32]
du[33] = p[33]
du[34] = p[34]
du[35] = p[35]
du[36] = p[36]
du[37] = p[37]
du[38] = p[38]
du[39] = p[39]
du[40] = p[40]
du[41] = p[41]
du[42] = p[42]
du[43] = p[43]
du[44] = p[44]
du[45] = p[45]
du[46] = p[46]
du[47] = p[47]
du[48] = p[48]
du[49] = p[49]
end
time_points = [[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[60, 84, 108],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[0, 24, 48, 84, 108, 132, 156, 180],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132],
[84, 132]]
function t_points_creator(time_points)
no_of_measurements = length(time_points)#length(string_measurements)
union_dataset_t = []
dataset_t_indices = []
total_no_measurements = 0
indices_in_stacked_form = []
for i in 1:no_of_measurements
tmp = [total_no_measurements+1]
total_no_measurements += length(time_points[i])
tmp = push!(tmp, total_no_measurements)
indices_in_stacked_form = push!(indices_in_stacked_form, tmp)
union_dataset_t = union(union_dataset_t, time_points[i])
end
union_dataset_t = sort!(union_dataset_t)
for i in 1:no_of_measurements
tmp = []
for j in 1:length(time_points[i])
push!(tmp, findall(x->x==time_points[i][j], union_dataset_t)[1])
end
push!(dataset_t_indices, tmp)
end
return union_dataset_t, dataset_t_indices, total_no_measurements, indices_in_stacked_form
end
saves_t, time_indices, total_no_measurements, indices_in_stacked_form = t_points_creator(time_points)
tstart = 0
tend = 200
real_param = ones(num_eq).*0.1
u0 = ones(num_eq).*2
data = Array(solve(ODEProblem(Test_ODE,u0,(tstart,tend),real_param), Tsit5(), saveat=saves_t))
callback = function (p, l)
println("Parameters: $p")
println("Loss: $l")
return false
end
function loss_ODE(model, u0, tstart, tend, model_params, indices_components, saves_t, data)
pred = solve(ODEProblem(model,u0,(tstart,tend),model_params), Tsit5(), saveat=saves_t)
prediction = Array(pred)
prediction_components = [prediction[1,:], prediction[2,:], prediction[3,:], prediction[4,:], prediction[5,:], prediction[6,:], prediction[7,:],
prediction[8,:], prediction[9,:], prediction[10,:], prediction[11,:], prediction[12,:], prediction[13,:], prediction[14,:],
prediction[15,:], prediction[16,:], prediction[17,:], prediction[18,:], prediction[19,:], prediction[20,:], prediction[21,:],
prediction[22,:], prediction[23,:], prediction[24,:], prediction[25,:], prediction[26,:], prediction[27,:], prediction[28,:],
prediction[29,:], prediction[30,:], prediction[32,:], prediction[33,:], prediction[34,:], prediction[35,:], prediction[36,:],
prediction[37,:], prediction[38,:], prediction[39,:], prediction[40,:], prediction[41,:], prediction[42,:], prediction[43,:],
prediction[44,:], prediction[45,:], prediction[46,:], prediction[47,:], prediction[48,:], prediction[49,:]]
loss = 0.0
for i in 1:length(prediction_components)
if length(indices_components[i]) < length(prediction_components[i]) && pred.retcode == "Success"
prediction_component=prediction_components[i][indices_components[i]]
data_component=data[i,:][indices_components[i]]
loss += sum(abs2,(prediction_component.+1e-8) .- (data_component.+1e-8))
else
loss = 1e10
break
end
end
loss
end
function train_model(model, u0, tstart, tend, p_guess, indices_components, saves_t, data, parameters_real)
println("The loss value for the current starting point is: $(loss_ODE(model, u0, tstart, tend, p_guess, indices_components, saves_t, data))")
# Train the ODE
# https://m3g.github.io/JuliaNotes.jl/v0.3/closures/
# explains why tmp_func is needed
tmp_func = x->loss_ODE(model, u0, tstart, tend, x, indices_components, saves_t, data)
min, loss =(ones(length(p_guess))*NaN, Inf)
lower_bounds, upper_bounds = (-10 .*ones(length(p_guess)), ones(length(p_guess)).*Inf)
res=DiffEqFlux.sciml_train(tmp_func, p_guess, Fminbox(LBFGS()), cb = callback; lower_bounds, upper_bounds, maxiters = 5000)
min, loss =(res.minimizer, res.minimum)
println("The parameters are $(min) with loss value $(loss)")
return(min, loss)
end
train_model(Test_ODE, u0, tstart, tend, ones(num_eq).*0.02, time_indices, saves_t, data, real_param)
This creates the following error:
ERROR: LoadError: MethodError: Cannot convert
an object of type Nothing to an object of type Float64
Closest candidates are:
convert(::Type{T}, ::Static.StaticFloat64{N}) where {N, T<:AbstractFloat} at ~/.julia/packages/Static/pkxBE/src/float.jl:26
convert(::Type{T}, ::LLVM.GenericValue, ::LLVM.LLVMType) where T<:AbstractFloat at ~/.julia/packages/LLVM/vQ98J/src/execution.jl:39
convert(::Type{T}, ::LLVM.ConstantFP) where T<:AbstractFloat at ~/.julia/packages/LLVM/vQ98J/src/core/value/constant.jl:103
…
Stacktrace:
[1] fill!(dest::Vector{Float64}, x::Nothing)
@ Base ./array.jl:351
[2] copyto!
@ ./broadcast.jl:921 [inlined]
[3] materialize!
@ ./broadcast.jl:871 [inlined]
[4] materialize!(dest::Vector{Float64}, bc::Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{0}, Nothing, typeof(identity), Tuple{Base.RefValue{Nothing}}})
@ Base.Broadcast ./broadcast.jl:868
[5] WARNING: both Flux and Iterators export “flatten”; uses of it in module DiffEqFlux must be qualified
WARNING: both Flux and Distributions export “params”; uses of it in module DiffEqFlux must be qualified
(::GalacticOptim.var"#268#278"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}})(::Vector{Float64}, ::Vector{Float64})
@ GalacticOptim ~/.julia/packages/GalacticOptim/1ht5p/src/function/zygote.jl:8
[6] (::GalacticOptim.var"#160#166"{OptimizationProblem{false, OptimizationFunction{false, GalacticOptim.AutoZygote, OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, GalacticOptim.var"#268#278"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#271#281"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#276#286", Nothing, Nothing, Nothing}, Vector{Float64}, SciMLBase.NullParameters, Vector{Float64}, Nothing, Nothing, Nothing, Base.Pairs{Symbol, var"#3#4", Tuple{Symbol}, NamedTuple{(:cb,), Tuple{var"#3#4"}}}}, GalacticOptim.var"#159#165"{OptimizationProblem{false, OptimizationFunction{false, GalacticOptim.AutoZygote, OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, GalacticOptim.var"#268#278"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#271#281"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#276#286", Nothing, Nothing, Nothing}, Vector{Float64}, SciMLBase.NullParameters, Vector{Float64}, Nothing, Nothing, Nothing, Base.Pairs{Symbol, var"#3#4", Tuple{Symbol}, NamedTuple{(:cb,), Tuple{var"#3#4"}}}}, OptimizationFunction{false, GalacticOptim.AutoZygote, OptimizationFunction{false, GalacticOptim.AutoZygote, OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, GalacticOptim.var"#268#278"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#271#281"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#276#286", Nothing, Nothing, Nothing}, GalacticOptim.var"#268#278"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#271#281"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#276#286", Nothing, Nothing, Nothing}}, OptimizationFunction{false, GalacticOptim.AutoZygote, OptimizationFunction{false, GalacticOptim.AutoZygote, OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, GalacticOptim.var"#268#278"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#271#281"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#276#286", Nothing, Nothing, Nothing}, GalacticOptim.var"#268#278"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#271#281"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#276#286", Nothing, Nothing, Nothing}})(G::Vector{Float64}, θ::Vector{Float64})
@ GalacticOptim ~/.julia/packages/GalacticOptim/1ht5p/src/solve/optim.jl:177
[7] value_gradient!!(obj::OnceDifferentiable{Float64, Vector{Float64}, Vector{Float64}}, x::Vector{Float64})
@ NLSolversBase ~/.julia/packages/NLSolversBase/cfJrN/src/interface.jl:82
[8] value_gradient!!(bw::Optim.BarrierWrapper{OnceDifferentiable{Float64, Vector{Float64}, Vector{Float64}}, Optim.BoxBarrier{Vector{Float64}, Vector{Float64}}, Float64, Float64, Vector{Float64}}, x::Vector{Float64})
@ Optim ~/.julia/packages/Optim/6Lpjy/src/multivariate/solvers/constrained/fminbox.jl:81
[9] initial_state(method::LBFGS{Optim.InverseDiagonal, LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Optim.var"#66#67"{Vector{Float64}, Vector{Float64}, Fminbox{LBFGS{Nothing, LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Optim.var"#19#21"}, Float64, Optim.var"#49#51"}, Optim.BarrierWrapper{OnceDifferentiable{Float64, Vector{Float64}, Vector{Float64}}, Optim.BoxBarrier{Vector{Float64}, Vector{Float64}}, Float64, Float64, Vector{Float64}}}}, options::Optim.Options{Float64, GalacticOptim.var"#_cb#164"{var"#3#4", Fminbox{LBFGS{Nothing, LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Optim.var"#19#21"}, Float64, Optim.var"#49#51"}, Base.Iterators.Cycle{Tuple{GalacticOptim.NullData}}}}, d::Optim.BarrierWrapper{OnceDifferentiable{Float64, Vector{Float64}, Vector{Float64}}, Optim.BoxBarrier{Vector{Float64}, Vector{Float64}}, Float64, Float64, Vector{Float64}}, initial_x::Vector{Float64})
@ Optim ~/.julia/packages/Optim/6Lpjy/src/multivariate/solvers/first_order/l_bfgs.jl:164
[10] optimize(df::OnceDifferentiable{Float64, Vector{Float64}, Vector{Float64}}, l::Vector{Float64}, u::Vector{Float64}, initial_x::Vector{Float64}, F::Fminbox{LBFGS{Nothing, LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Optim.var"#19#21"}, Float64, Optim.var"#49#51"}, options::Optim.Options{Float64, GalacticOptim.var"#_cb#164"{var"#3#4", Fminbox{LBFGS{Nothing, LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Optim.var"#19#21"}, Float64, Optim.var"#49#51"}, Base.Iterators.Cycle{Tuple{GalacticOptim.NullData}}}})
@ Optim ~/.julia/packages/Optim/6Lpjy/src/multivariate/solvers/constrained/fminbox.jl:322
[11] ___solve(prob::OptimizationProblem{false, OptimizationFunction{false, GalacticOptim.AutoZygote, OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, GalacticOptim.var"#268#278"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#271#281"{GalacticOptim.var"#267#277"{OptimizationFunction{true, GalacticOptim.AutoZygote, DiffEqFlux.var"#84#89"{var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Nothing}}, GalacticOptim.var"#276#286", Nothing, Nothing, Nothing}, Vector{Float64}, SciMLBase.NullParameters, Vector{Float64}, Nothing, Nothing, Nothing, Base.Pairs{Symbol, var"#3#4", Tuple{Symbol}, NamedTuple{(:cb,), Tuple{var"#3#4"}}}}, opt::Fminbox{LBFGS{Nothing, LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Optim.var"#19#21"}, Float64, Optim.var"#49#51"}, data::Base.Iterators.Cycle{Tuple{GalacticOptim.NullData}}; cb::Function, maxiters::Int64, maxtime::Nothing, abstol::Nothing, reltol::Nothing, progress::Bool, kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
@ GalacticOptim ~/.julia/packages/GalacticOptim/1ht5p/src/solve/optim.jl:196
[12] #__solve#141
@ ~/.julia/packages/GalacticOptim/1ht5p/src/solve/optim.jl:49 [inlined]
[13] #solve#480
@ ~/.julia/packages/SciMLBase/L7Nun/src/solve.jl:3 [inlined]
[14] sciml_train(::var"#5#6"{typeof(Test_ODE), Vector{Float64}, Int64, Int64, Vector{Any}, Vector{Any}, Matrix{Float64}}, ::Vector{Float64}, ::Fminbox{LBFGS{Nothing, LineSearches.InitialStatic{Float64}, LineSearches.HagerZhang{Float64, Base.RefValue{Bool}}, Optim.var"#19#21"}, Float64, Optim.var"#49#51"}, ::Nothing; lower_bounds::Vector{Float64}, upper_bounds::Vector{Float64}, maxiters::Int64, kwargs::Base.Pairs{Symbol, var"#3#4", Tuple{Symbol}, NamedTuple{(:cb,), Tuple{var"#3#4"}}})
@ DiffEqFlux ~/.julia/packages/DiffEqFlux/UPHk9/src/train.jl:91
[15] train_model(model::Function, u0::Vector{Float64}, tstart::Int64, tend::Int64, p_guess::Vector{Float64}, indices_components::Vector{Any}, saves_t::Vector{Any}, data::Matrix{Float64}, parameters_real::Vector{Float64})
@ Main ~/Job_Array_Timer_Minimizer/Job_Array_Euler_TFN_Test_2_tmp.jl:200
[16] top-level scope
@ ~/Job_Array_Timer_Minimizer/Job_Array_Euler_TFN_Test_2_tmp.jl:209
in expression starting at /cluster/home/malmansto/Job_Array_Timer_Minimizer/Job_Array_Euler_TFN_Test_2_tmp.jl:209