Thank you for your advice
i changed it as you stated
function mle_Loglikelihood(p::Array, data::F_t_Data, M::Model)
L = []::Vector{<:Real}
for (F_i, t_i) in zip(data.F, data.t)
M.F = F_i
....
logL = Loglikelihood(L::AbstractVector{<:Real})
#@show logL
return logL
end
function Loglikelihood(L::AbstractVector{<:Real})
logL = -sum(log.(L))
return logL
end
but this error happened
ERROR: LoadError: TypeError: in typeassert, expected Vector{<:Real}, got a value of type Vector{Any}
Stacktrace:
[1] mle_Loglikelihood(p::Vector{Float64}, data::F_t_Data, M::Model)
@ Main C:\Users\Administrator\OneDrive\Desktop\CD40code\code\src\mle_fitting.jl:36
[2] (::var"#15#20"{Model, F_t_Data, var"#14#19"})(p::Vector{Float64})
@ Main C:\Users\Administrator\OneDrive\Desktop\CD40code\code\src\mle_fitting.jl:64
[3] mle_fit_log(p::Vector{Float64}, M::Model, data::F_t_Data, bounds::Vector{Tuple{Float64, Float64}})
@ Main C:\Users\Administrator\OneDrive\Desktop\CD40code\code\src\mle_fitting.jl:68
[4] top-level scope
@ C:\Users\Administrator\OneDrive\Desktop\CD40code\code\excution_MLE.jl:50
in expression starting at C:\Users\Administrator\OneDrive\Desktop\CD40code\code\excution_MLE.jl:50
code for line 36 is
L = []::Vector{<:Real}
The full code are down here
using DifferentialEquations
using Optim
using BlackBoxOptim
using Zygote
using ForwardDiff
using LinearAlgebra
#______________________________________________________________
mutable struct F_t_Data
F
t
end
mutable struct Model
model
u_0::Vector
u_0_type::String
tspan::Tuple
cons::Vector #using when MTP
F :: Float64
solver ::Type #using when MTP
end
mutable struct Optpar
rates
Loglikelihood
hes
end
#_____________________________________________________________________
function mle_Loglikelihood(p::Array, data::F_t_Data, M::Model)
L = []::Vector{<:Real}
for (F_i, t_i) in zip(data.F, data.t)
M.F = F_i
L_i = M.model(p, M, t_i ; F=F_i)
if L_i > 0
push!(L, L_i)
else
end
end
logL = Loglikelihood(L::AbstractVector{<:Real})
#@show logL
return logL
end
function Loglikelihood(L::AbstractVector{<:Real})
logL = -sum(log.(L))
return logL
end
function mle_fit_log(p::Vector{Float64}, M::Model, data::F_t_Data, bounds::Vector{Tuple{Float64,Float64}}) #log parameter estimating
p2lnp = (p) -> log10.(p)
lnp2p = (lnp::Vector{Float64}) -> 10 .^ lnp
loss_in_est = (p) -> mle_Loglikelihood(lnp2p(p)::Vector{Float64}, data, M)
lnp = p2lnp(p)
loss_in_est(lnp)
log_bounds = map(x -> (log10(x[1] + 1e-7), log(x[2])), bounds)
res_bbo = bboptimize(loss_in_est; SearchRange=log_bounds,
NumDimensions=length(lnp),
NThreads=Threads.nthreads() - 1,
MaxSteps = 5000)
log_p_0_bbo = best_candidate(res_bbo)
p_0_bbo = 10 .^ log_p_0_bbo
@show p_0_bbo
#function loss_in_opt(p::Union{Vector{Float64}, Vector{ForwardDiff.Tag{…}, Float64, 8}})
# if typeof(p) == Vector{Float64}
# loss_in = mle_Loglikelihood(p, data::F_t_Data, M::Model)
# elseif typeof(p) == Vector{ForwardDiff.Dual}
# loss_in = ForwardDiff.value.(p)[1]
# end
# return loss_in
#end
loss_in_opt = (p) -> mle_Loglikelihood(p, data, M)
lb, ub = boundaries(bounds)
od = OnceDifferentiable(loss_in_opt, p_0_bbo; autodiff = :forward)
res_opt = optimize(od, lb, ub, p_0_bbo, Fminbox(BFGS()),
Optim.Options( show_trace = false,
iterations = 100,
outer_iterations =2))
rates = res_opt.minimizer
l = res_opt.minimum
hes = ForwardDiff.hessian(loss_in_opt, rates)
println("Model used == ", M.model)
println("Loss (MLE) == ", l)
println("Rates == ", rates)
res = OptRes(rates, l, hes)
return res
end
function boundaries(bounds)
lb = [b[1] for b in bounds]
ub = [b[2] for b in bounds]
return lb,ub
end
By the way, thank you for your advice on question post
I`ll keep that in mind