Your code looks okay.
You may get better performance by extracting common subexpressions into @NLexpression. It looks like there are a lot of similar terms there!
One potential issue with this is that we disable second-order derivative information for user-defined functions. Instead of writing out the summation in a function, you could build a nonlinear expression for each I, so something like this (untested. Probably has typo’s etc. But it should point you in the right direction)
expr = Any[]
for i = 1:n
k = df.owner[i];
θb = df.b_ln_prod[i];
θs = df.s_ln_prod[i];
b_idx = df.b_delta[i];
s_idx = df.s_delta[i];
δb = δ[b_idx];
δs = δ[s_idx];
βB = @NLexpression(model, 0.5 + 0.5*δb^α);
βS = @NLexpression(model, 0.5 - 0.5*δs^α);
βO = 0.5;
ψB = @NLexpression(model, α^(α/(1-α))*((1-α*βB)*(βB*θb)^(ρ/(1-ρ))+(1-α*(1-βB))*((1-βB)*θs)^(ρ/(1-ρ)))/((βB*θb)^(ρ/(1-ρ))+((1-βB)*θs)^(ρ/(1-ρ)))^((ρ-α)/(ρ*(1-α))))
ψS = @NLexpression(model, α^(α/(1-α))*((1-α*βS)*(βS*θb)^(ρ/(1-ρ))+(1-α*(1-βS))*((1-βS)*θs)^(ρ/(1-ρ)))/((βS*θb)^(ρ/(1-ρ))+((1-βS)*θs)^(ρ/(1-ρ)))^((ρ-α)/(ρ*(1-α))))
ψO = @NLexpression(model, α^(α/(1-α))*((1-α*βO)*(βO*θb)^(ρ/(1-ρ))+(1-α*(1-βO))*((1-βO)*θs)^(ρ/(1-ρ)))/((βO*θb)^(ρ/(1-ρ))+((1-βO)*θs)^(ρ/(1-ρ)))^((ρ-α)/(ρ*(1-α))))
if k == "b"
push!(model, @NLexpression(model, (exp(ψB-fB))/(exp(ψB-fB)+exp(ψS-fS)+exp(ψO-fO))))
elseif k == "s"
# ...
end
end
@NLobjective(model, Min, -sum(log(expr[i] for i in 1:n)))
To get better performance from Ipopt, use a different linear solver: https://github.com/jump-dev/Ipopt.jl#linear-solvers
Multiple solutions are not possible.
Use value(ρ) to obtain the solution. After that it’s just a normal Julia variable, so you can do anything you like with it. See, e.g., dictionary - Compact way to save JuMP optimization results in DataFrames - Stack Overflow