JuMP beginners help

Hi, I have the following problem:

N=10
r=rand(N)*2                    
a=rand(N,N)*0.5-0.25
[a[i,i]=1.0 for i=1:N]

m = Model(solver = NLoptSolver(algorithm=:LD_MMA))
@variable(m, x[1:N] >= 0 )
@variable(m, 1.0 >= h[1:N] >= 0 )
@NLconstraint(m, x.*(r.*(1-a*x)-h)== 0)
@NLobjective(m, Max, sum(x.*h))

status = solve(m)

but get error “Unexpected vector” (ok, x,r and h are vectors of length N and a is a matrix N x N). I assume NLopt doesn’t like that

I then tried to make it without vectors as:

N=10
r=rand(N)*2                    
a=rand(N,N)*0.5-0.25
[a[i,i]=1.0 for i=1:N]

m = Model(solver = NLoptSolver(algorithm=:LD_MMA))
@variable(m, x1 >= 0)
@variable(m, x2 >= 0)
@variable(m, x3 >= 0)
@variable(m, x4 >= 0)
@variable(m, x5 >= 0)
@variable(m, x6 >= 0)
@variable(m, x7 >= 0)
@variable(m, x8 >= 0)
@variable(m, x9 >= 0)
@variable(m, x10 >= 0)
@variable(m, h1 >= 0)
@variable(m, h2 >= 0)
@variable(m, h3 >= 0)
@variable(m, h4 >= 0)
@variable(m, h5 >= 0)
@variable(m, h6 >= 0)
@variable(m, h7 >= 0)
@variable(m, h8 >= 0)
@variable(m, h9 >= 0)
@variable(m, h10 >= 0)

@NLconstraint(m, x1*(r[1]*(1-(x1*a[1,1]+x2*a[2,1]+x3*a[3,1]+x4*a[4,1]+x5*a[5,1]+x6*a[6,1]+x7*a[7,1]+x8*a[8,1]+x9*a[9,1]+x10*a[10,1]))-h1) == 0.0)
@NLconstraint(m, x1*(r[1]*(1-(x1*a[1,2]+x2*a[2,2]+x3*a[3,2]+x4*a[4,2]+x5*a[5,2]+x6*a[6,2]+x7*a[7,2]+x8*a[8,2]+x9*a[9,2]+x10*a[10,2]))-h2) == 0.0)
@NLconstraint(m, x1*(r[1]*(1-(x1*a[1,3]+x2*a[2,3]+x3*a[3,3]+x4*a[4,3]+x5*a[5,3]+x6*a[6,3]+x7*a[7,3]+x8*a[8,3]+x9*a[9,3]+x10*a[10,3]))-h3) == 0.0)
@NLconstraint(m, x1*(r[1]*(1-(x1*a[1,4]+x2*a[2,4]+x3*a[3,4]+x4*a[4,4]+x5*a[5,4]+x6*a[6,4]+x7*a[7,4]+x8*a[8,4]+x9*a[9,4]+x10*a[10,4]))-h4) == 0.0)
@NLconstraint(m, x1*(r[1]*(1-(x1*a[1,5]+x2*a[2,5]+x3*a[3,5]+x4*a[4,5]+x5*a[5,5]+x6*a[6,5]+x7*a[7,5]+x8*a[8,5]+x9*a[9,5]+x10*a[10,5]))-h5) == 0.0)
@NLconstraint(m, x1*(r[1]*(1-(x1*a[1,6]+x2*a[2,6]+x3*a[3,6]+x4*a[4,6]+x5*a[5,6]+x6*a[6,6]+x7*a[7,6]+x8*a[8,6]+x9*a[9,6]+x10*a[10,6]))-h6) == 0.0)
@NLconstraint(m, x1*(r[1]*(1-(x1*a[1,7]+x2*a[2,7]+x3*a[3,7]+x4*a[4,7]+x5*a[5,7]+x6*a[6,7]+x7*a[7,7]+x8*a[8,7]+x9*a[9,7]+x10*a[10,7]))-h7) == 0.0)
@NLconstraint(m, x1*(r[1]*(1-(x1*a[1,8]+x2*a[2,8]+x3*a[3,8]+x4*a[4,8]+x5*a[5,8]+x6*a[6,8]+x7*a[7,8]+x8*a[8,8]+x9*a[9,8]+x10*a[10,8]))-h8) == 0.0)
@NLconstraint(m, x1*(r[1]*(1-(x1*a[1,9]+x2*a[2,9]+x3*a[3,9]+x4*a[4,9]+x5*a[5,9]+x6*a[6,9]+x7*a[7,9]+x8*a[8,9]+x9*a[9,9]+x10*a[10,9]))-h9) == 0.0)
@NLconstraint(m, x1*(r[1]*(1-(x1*a[1,10]+x2*a[2,10]+x3*a[3,10]+x4*a[4,10]+x5*a[5,10]+x6*a[6,10]+x7*a[7,10]+x8*a[8,10]+x9*a[9,10]+x10*a[10,10]))-h10) == 0.0)

@NLobjective(m, Max, x1*h1+x2*h2+x3*h3+x4*h4+x5*h5+x6*h6+x7*h7+x8*h8+x9*h9+x10*h10)

status = solve(m)

(note: above window may need scrolling)

but now I get: “ArgumentError: invalid NLopt arguments”

Any hints appreciated.

From the JuMP docs (http://www.juliaopt.org/JuMP.jl/0.18/nlp.html#syntax-notes)

All expressions must be simple scalar operations. You cannot use dot , matrix-vector products, vector slices, etc. Translate vector operations into explicit sum() operations or use the AffExpr plus auxiliary variable trick described below.

Thus, you can do something like:

N=10
r=rand(N)*2                    
a=rand(N,N)*0.5-0.25
for i in 1:N
    a[i,i] = 1.0
end
m = Model()
@variable(m, x[1:N] >= 0 )
@variable(m, 1.0 >= h[1:N] >= 0 )
@NLobjective(m, Max, sum(x[i]*h[i] for i in 1:N))
# @NLconstraint(m, x.*(r.*(1-a*x)-h)== 0)
for i in 1:N
    @NLconstraint(m, x[i] * (r[i] * (1 - sum(a[i,j] * x[j] for j in 1:N)) - h[i]) == 0)
end 


As for the second error, the LD_MMA method doesn’t support equality constraints. From the NLopt documentation (https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/#mma-method-of-moving-asymptotes-and-ccsa):

implementation of the globally-convergent method-of-moving-asymptotes (MMA) algorithm for gradient-based local optimization, including nonlinear inequality constraints (but not equality constraints), specified in NLopt as NLOPT_LD_MMA

You may wish to try a different method such as :LD_SLSQP (https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/#slsqp)

1 Like

Many thanks indeed!!