Hello,
I am using Ipopt to write an optimization problem with a user-defined objective function that has 5 scalar inputs and one vector input. I define the objective function as likelihood((ρ,α,fB,fS,fO,δ…), where δ is the vector input with spatter syntax. Julia does not seem to accept this format, but I am not sure how to correct this error because 3 of the scalar variables have different upper bounds. Please see below for details:
My variables, constraint, and objective functions are defined as follows:
@variables(model, begin
0<=ρ<=1, (start=0.5, base_name = "rho")
0<=α<=1, (start=0.6, base_name = "alpha")
0<=fB<=10, (start = 0, base_name = "fB")
0<=fS<=10, (start = 0, base_name = "fS")
0<=fO<=10, (start = 0, base_name = "fO")
0<=δ[i=1:n_delta]<=1, (start=0.5, base_name = "delta$i")
end)
@constraint(model, con, α - ρ >= 0.001)
register(model, :likelihood, n_parameter, likelihood; autodiff = true)
@NLobjective(model, Min, likelihood(ρ,α,fB,fS,fO,δ...))
The user-defined objective function is:
function likelihood(ρ,α,fB,fS,fO,δ...)
df = CSV.read("data_estimation.csv", DataFrame; header=true);
n = nrow(df)
contributions = zeros(n,1);
for i = 1:n
k = df.owner[i];
θb = df.b_ln_prod[i];
θs = df.s_ln_prod[i];
b_idx = df.b_delta[i];
s_idx = df.s_delta[i];
δb = δ[b_idx];
δs = δ[s_idx];
βB = 0.5 + 0.5*δb^α;
βS = 0.5 - 0.5*δs^α;
βO = 0.5;
ψB = α^(α/(1-α))*((1-α*βB)*(βB*θb)^(ρ/(1-ρ))+(1-α*(1-βB))*((1-βB)*θs)^(ρ/(1-ρ)))/((βB*θb)^(ρ/(1-ρ))+((1-βB)*θs)^(ρ/(1-ρ)))^((ρ-α)/(ρ*(1-α)));
ψS = α^(α/(1-α))*((1-α*βS)*(βS*θb)^(ρ/(1-ρ))+(1-α*(1-βS))*((1-βS)*θs)^(ρ/(1-ρ)))/((βS*θb)^(ρ/(1-ρ))+((1-βS)*θs)^(ρ/(1-ρ)))^((ρ-α)/(ρ*(1-α)));
ψO = α^(α/(1-α))*((1-α*βO)*(βO*θb)^(ρ/(1-ρ))+(1-α*(1-βO))*((1-βO)*θs)^(ρ/(1-ρ)))/((βO*θb)^(ρ/(1-ρ))+((1-βO)*θs)^(ρ/(1-ρ)))^((ρ-α)/(ρ*(1-α)));
contributions[i] = ((k=="b" ? 1 : 0)*(exp(ψB-fB))/(exp(ψB-fB)+exp(ψS-fS)+exp(ψO-fO))
+(k=="s" ? 1 : 0)*(exp(ψS-fS))/(exp(ψB-fB)+exp(ψS-fS)+exp(ψO-fO))
+(k=="n" ? 1 : 0)*(exp(ψO-fO))/(exp(ψB-fB)+exp(ψS-fS)+exp(ψO-fO)))
end
return - sum(log.(contributions))
end
When trying to optimize the model, I receive an error message saying
“MethodError: no method matching Float64(::ForwardDiff.Dual{ForwardDiff.Tag{JuMP.var”#136#138"{typeof(likelihood)}, Float64}, Float64, 12})"