I am trying to provide derivatives for a function to see if it can speed up the optimization, but I am running in to errors. Unexpected array VariableRef[x[1], x[2], x[3], x[4]] in nonlinear expression. Nonlinear expressions may contain only scalar expressions.
I have tried to follow the example given in the documentation here Nonlinear Modeling · JuMP. The code for this is shown in the function nlp_analytic(p, c1, c2,b, k, n)
. Any ideas?
Can we use @NLparameter
and NLexpression
in user defined functions?
# Without user defined function
using JuMP, Ipopt
function nlp(p, c1, c2,b, k, n)
model = Model(Ipopt.Optimizer)
@variable(model, 0 <= x[i = 1:n] <= 1)
@variable(model, 0 <= var1 <=1)
@variable(model, 0 <= var2 <=1)
@variable(model, 0 <= var3 <=1)
@objective(model, Max, var1 - var2 + var3)
@NLparameter(model, p1[j=1:k, i=1:n] == c1[j, i])
@NLparameter(model, p2[j=1:k, i=1:n] == c1[j, i])
@NLexpression(model, xp, sum(x[i] * p[i] for i in 1:n))
@NLexpression(model, c1_expr[j=1:k], sum(p1[j, i] * x[i] for i in 1:n))
@NLexpression(model, c2_expr[j=1:k], sum(p2[j, i] * x[i] for i in 1:n))
@NLconstraint(model, con3, xp - sum(b[j] / (1+var1)^j for j in 1:k) == 0)
@NLconstraint(model, con5, xp - sum(c1_expr[j] / (1+var2)^j for j in 1:k) == 0)
@NLconstraint(model, con6, xp - sum(c2_expr[j] / (1+var3)^j for j in 1:k) == 0)
@constraint(model, con1, c1*x .>= b)
JuMP.optimize!(model)
end
function data()
p = rand(400:700,5,1); c1= (rand(100:200,4,5))'; c2 = 0.9 .* c1
b =rand(150:250,5,1); n= size(c1,2); k = size(c1,1)
return p, c1, c2, b, n, k
end
p, c1, c2, b, n, k = data()
nlp_model(p, c1, c2,b, k, n)
Attempt with analytic derivatives
function nlp_analytic(p, c1, c2,b, k, n)
model = Model(Ipopt.Optimizer)
@variable(model, 0 <= x[i = 1:n] <= 1)
@variable(model, 0 <= var1 <=1)
@variable(model, 0 <= var2 <=1)
@variable(model, 0 <= var3 <=1)
@objective(model, Max, var1 - var2 + var3)
@NLparameter(model, p1[j=1:k, i=1:n] == c1[j, i])
@NLparameter(model, p2[j=1:k, i=1:n] == c1[j, i])
@NLexpression(model, xp, sum(x[i] * p[i] for i in 1:n))
# @NLexpression(model, c1_expr[j=1:k], sum(p1[j, i] * x[i] for i in 1:n))
# @NLexpression(model, c2_expr[j=1:k], sum(p2[j, i] * x[i] for i in 1:n))
@NLconstraint(model, con3, xp - sum(b[j] / (1+var1)^j for j in 1:k) == 0)
f(x, var,para) = sum(x[i] * p[i] for i in 1:n) - sum(b[j] / (1 + var)^j for j in 1:k) + sum(para(j, i) * x[i] / (1 + var)^j for i in 1:n, j in 1:k)
function ∇f(g::AbstractVector{T}, x::T, var::T, para) where {T}
g[1] = sum(p[i] for i in 1:n) + sum(para(j, i) / (1 + var)^t[j] for i in 1:n, j in 1:k)
g[2] = sum(-j * para(j, i) * x[i] / (1 + var)^(j + 1) for i in 1:n, j in 1:k)
return
end
register(model, :analytic, 2, f, ∇f)
@NLconstraint(model,Con5, f(x,var2,p1) == 0.0)
@NLconstraint(model,con6, f(x,var3,p2) == 0.0)
# @NLconstraint(model, con5, xp - sum(c1_expr[j] / (1+var2)^j for j in 1:k) == 0)
# @NLconstraint(model, con6, xp - sum(c2_expr[j] / (1+var3)^j for j in 1:k) == 0)
@constraint(model, con1, c1*x .>= b)
JuMP.optimize!(model)
end