Minimisation with equality constraints

I am trying a constrained minimisation with equality constraints.

using LinearAlgebra
using Optimization, OptimizationMOI, AmplNLWriter, Ipopt_jll

function objectivehelp(N)
    car = rand(N) .+ 1; sar = rand(N);

    return car, sar
end

function objective(x, N)
    car, sar = objectivehelp(N);
    res = 0;
    for ij = 1:N
        res = res + car[ij] * (x[ij]-sar[ij])^2;
    end
    res = abs(res);

    return res
end

function constraint(x,N)
    # res = 0;
    # for ij = 1:N
    #     res = res + x[ij]^2;
    # end
    res = zeros(Float64, 1);
    for ij = 1:N
        res[1] = res[1] + x[ij]^2;
    end
    
    return res
end

N = 3;
car = rand(N) .+ 1; sar = rand(N);

obj(x, p) = objective(x,N)
cons(res, x, p) = (res .= constraint(x,N))

x0 = ones(Float64, N);

optprob = OptimizationFunction(obj, Optimization.AutoModelingToolkit(), cons = cons)
prob = OptimizationProblem(optprob, x0, lcons = ones(Float64,1), ucons = ones(Float64,1))
sol = solve(prob, AmplNLWriter.Optimizer(Ipopt_jll.amplexe), maxiters = 1000, abstol = 1e-6, reltol = 1e-6)

The code using the commented part in the constraint function works fine, but when I use the uncommented part instead, it throws the error

ERROR: LoadError: MethodError: no method matching Float64(::Symbolics.Num)

Closest candidates are:
  (::Type{T})(::Real, ::RoundingMode) where T<:AbstractFloat
   @ Base rounding.jl:207
  (::Type{T})(::T) where T<:Number
   @ Core boot.jl:792
  (::Type{T})(::SymbolicUtils.Symbolic) where T<:Union{AbstractFloat, Integer, Complex{<:AbstractFloat}, Complex{<:Integer}}
   @ Symbolics ~/.julia/packages/Symbolics/5BibL/src/Symbolics.jl:149
  ...

Stacktrace:
  [1] convert(#unused#::Type{Float64}, x::Symbolics.Num)
    @ Base ./number.jl:7
  [2] setindex!(A::Vector{Float64}, x::Symbolics.Num, i1::Int64)
    @ Base ./array.jl:969
  [3] constraint(x::Vector{Symbolics.Num}, N::Int64)
    @ Main ~/Work/Higgs work and codes/Codes/testoptim.jl:28
  [4] cons(res::Vector{Symbolics.Num}, x::Vector{Symbolics.Num}, p::Vector{Any})
    @ Main ~/Work/Higgs work and codes/Codes/testoptim.jl:38
  [5] modelingtoolkitize(prob::OptimizationProblem{true, OptimizationFunction{true, AutoModelingToolkit, typeof(obj), Nothing, Nothing, Nothing, typeof(cons), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Vector{Float64}, SciMLBase.NullParameters, Nothing, Nothing, Nothing, Vector{Float64}, Vector{Float64}, Nothing, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}}; kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
    @ ModelingToolkit ~/.julia/packages/ModelingToolkit/dCa81/src/systems/optimization/modelingtoolkitize.jl:23
  [6] modelingtoolkitize(prob::OptimizationProblem{true, OptimizationFunction{true, AutoModelingToolkit, typeof(obj), Nothing, Nothing, Nothing, typeof(cons), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Vector{Float64}, SciMLBase.NullParameters, Nothing, Nothing, Nothing, Vector{Float64}, Vector{Float64}, Nothing, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}})
    @ ModelingToolkit ~/.julia/packages/ModelingToolkit/dCa81/src/systems/optimization/modelingtoolkitize.jl:6
  [7] instantiate_function(f::Function, cache::Optimization.ReInitCache{Vector{Float64}, SciMLBase.NullParameters}, adtype::AutoModelingToolkit, num_cons::Int64)
    @ OptimizationMTKExt ~/.julia/packages/Optimization/fPVIW/ext/OptimizationMTKExt.jl:58
  [8] OptimizationMOI.MOIOptimizationNLPCache(prob::OptimizationProblem{true, OptimizationFunction{true, AutoModelingToolkit, typeof(obj), Nothing, Nothing, Nothing, typeof(cons), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Vector{Float64}, SciMLBase.NullParameters, Nothing, Nothing, Nothing, Vector{Float64}, Vector{Float64}, Nothing, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}}, opt::AmplNLWriter.Optimizer; callback::Nothing, kwargs::Base.Pairs{Symbol, Union{Nothing, Real}, NTuple{4, Symbol}, NamedTuple{(:maxiters, :maxtime, :abstol, :reltol), Tuple{Int64, Nothing, Float64, Float64}}})
    @ OptimizationMOI ~/.julia/packages/OptimizationMOI/Ip1EA/src/nlp.jl:109
  [9] MOIOptimizationNLPCache
    @ ~/.julia/packages/OptimizationMOI/Ip1EA/src/nlp.jl:105 [inlined]
 [10] __init(prob::OptimizationProblem{true, OptimizationFunction{true, AutoModelingToolkit, typeof(obj), Nothing, Nothing, Nothing, typeof(cons), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Vector{Float64}, SciMLBase.NullParameters, Nothing, Nothing, Nothing, Vector{Float64}, Vector{Float64}, Nothing, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}}, opt::AmplNLWriter.Optimizer; maxiters::Int64, maxtime::Nothing, abstol::Float64, reltol::Float64, kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
    @ OptimizationMOI ~/.julia/packages/OptimizationMOI/Ip1EA/src/OptimizationMOI.jl:180
 [11] __init
    @ ~/.julia/packages/OptimizationMOI/Ip1EA/src/OptimizationMOI.jl:172 [inlined]
 [12] #init#617
    @ ~/.julia/packages/SciMLBase/ynHlA/src/solve.jl:163 [inlined]
 [13] init
    @ ~/.julia/packages/SciMLBase/ynHlA/src/solve.jl:161 [inlined]
 [14] solve(::OptimizationProblem{true, OptimizationFunction{true, AutoModelingToolkit, typeof(obj), Nothing, Nothing, Nothing, typeof(cons), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}, Vector{Float64}, SciMLBase.NullParameters, Nothing, Nothing, Nothing, Vector{Float64}, Vector{Float64}, Nothing, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}}, ::AmplNLWriter.Optimizer; kwargs::Base.Pairs{Symbol, Real, Tuple{Symbol, Symbol, Symbol}, NamedTuple{(:maxiters, :abstol, :reltol), Tuple{Int64, Float64, Float64}}})
    @ SciMLBase ~/.julia/packages/SciMLBase/ynHlA/src/solve.jl:94
 [15] top-level scope
    @ ~/Work/Higgs work and codes/Codes/testoptim.jl:44
 [16] include(fname::String)
    @ Base.MainInclude ./client.jl:478
 [17] top-level scope
    @ REPL[1]:1
in expression starting at /home/aritra/Work/Higgs work and codes/Codes/testoptim.jl:44

I was looking for a way to supply multiple equality constraints, and hence I was trying out the commented part.

Any ideas what the error is?

1 Like

Have you considered using JuMP for this?

using JuMP, Ipopt
N = 3
car = rand(N) .+ 1
sar = rand(N)
model = Model(Ipopt.Optimizer)
@variable(model, x[1:N])
@objective(model, Min, sum(car[i] * (x[i] - sar[i])^2 for i in 1:N))
@constraint(model, sum(x[i]^2 for i in 1:N) == 1)
2 Likes

Thanks. Is it possible to minimise user defined functions as shown below? The example provided below doesn’t work. I have to minimise such explicitly-stated functions, which cannot be written in one line.

using LinearAlgebra
using JuMP, Ipopt

function objectivehelp(x, N)
    xp = zeros(N);
    for ij = 1:N
        xp[ij] = 2*x[ij];
    end

    return xp
end

function objective(x, N)
    car = rand(N) .+ 1; sar = rand(N);
    xp = objectivehelp(x, N);
    
    res = 0;
    for ij = 1:N
        res = res + car[ij] * ( xp[ij]^0.5-sar[ij])^2;
    end
    res = abs(res);

    return res
end

function constrainthelp(x)
    res = 2*x;

    return res
end

function constraint(x,N)
    y = constrainthelp(x);
    cmat = zeros(Float64, 2,N);
    for ij = 1:2
        for jk = 1:N
            cmat[ij,jk] = rand()-0.5;
        end
    end
    res = cmat * y;
    
    return res
end

obj(x) = objective(x,N)
cons(x) = constraint(x,N)

N = 3
car = rand(N) .+ 1
sar = rand(N)
model = Model(Ipopt.Optimizer)
@variable(model, x[1:N])
@objective(model, Min, obj(x))
@constraint(model, cons(x))
optimize!(model)
objective_value(model)
value(x)
1 Like

A few comments:

  • Your functions need to be deterministic. Currently, each time you call objective and constraint you will get a different answer
  • zeros(N) creates a Vector{Float64}. You can use Julia’s type dispatch to create a zero(T, N) instead, which creates an array of the right type.
  • Your @constraint line is missing a comparison operator like == 1.
  • Your functions include x^0.5, for which the gradient isn’t defined when x = 0. You probably need to set a non-zero lower bound for x

Making all those changes, I get something like this:

julia> using JuMP, Ipopt

julia> function objective_help(x::Vector{VariableRef})
           N = length(x)
           xp = zeros(AffExpr, N)
           for ij in 1:N
               xp[ij] = 2 * x[ij]
           end
           return xp
       end
objective_help (generic function with 2 methods)

julia> function objective(x, car, sar)
           N = length(x)
           xp = objective_help(x)
           res = sum(car[ij] * (xp[ij]^0.5 - sar[ij])^2 for ij in 1:N)
           return abs(res)
       end
objective (generic function with 1 method)

julia> constraint_help(x) = 2 .* x
constraint_help (generic function with 1 method)

julia> function constraint(x, cmat)
           y = constraint_help(x);
           return cmat * y
       end
constraint (generic function with 1 method)

julia> N = 3
3

julia> car = rand(N) .+ 1
3-element Vector{Float64}:
 1.3146682360894872
 1.0688322590056367
 1.213419895106776

julia> sar = rand(N)
3-element Vector{Float64}:
 0.22311572604394314
 0.525736516469558
 0.0945780732842676

julia> cmat = zeros(Float64, 2, N)
2Ă—3 Matrix{Float64}:
 0.0  0.0  0.0
 0.0  0.0  0.0

julia> for ij in 1:2, jk in 1:N
           cmat[ij, jk] = rand() - 0.5
       end

julia> model = Model(Ipopt.Optimizer)
A JuMP Model
Feasibility problem with:
Variables: 0
Model mode: AUTOMATIC
CachingOptimizer state: EMPTY_OPTIMIZER
Solver name: Ipopt

julia> @variable(model, x[1:N] >= 0.00001)
3-element Vector{VariableRef}:
 x[1]
 x[2]
 x[3]

julia> @objective(model, Min, objective(x, car, sar))
abs((1.3146682360894872 * ((((2 x[1]) ^ 0.5) - 0.22311572604394314) ^ 2.0)) + (1.213419895106776 * ((((2 x[3]) ^ 0.5) - 0.0945780732842676) ^ 2.0)) + (1.0688322590056367 * ((((2 x[2]) ^ 0.5) - 0.525736516469558) ^ 2.0)))

julia> @constraint(model, constraint(x, cmat) .== 1)
2-element Vector{ConstraintRef{Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.EqualTo{Float64}}, ScalarShape}}:
 -0.018778023797837218 x[1] + 0.7589910154986776 x[2] - 0.8148125363688683 x[3] = 1
 -0.6298413441703727 x[1] - 0.17177726380301261 x[2] + 0.9023777745930739 x[3] = 1

julia> optimize!(model)
This is Ipopt version 3.14.13, running with linear solver MUMPS 5.6.1.

Number of nonzeros in equality constraint Jacobian...:        6
Number of nonzeros in inequality constraint Jacobian.:        0
Number of nonzeros in Lagrangian Hessian.............:        6

Total number of variables............................:        3
                     variables with only lower bounds:        3
                variables with lower and upper bounds:        0
                     variables with only upper bounds:        0
Total number of equality constraints.................:        2
Total number of inequality constraints...............:        0
        inequality constraints with only lower bounds:        0
   inequality constraints with lower and upper bounds:        0
        inequality constraints with only upper bounds:        0

iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls
   0  1.6923605e-01 1.00e+00 2.00e+00  -1.0 0.00e+00    -  0.00e+00 0.00e+00   0
   1  1.8237829e-01 9.95e-01 1.58e+01  -1.0 1.81e+00    -  7.62e-03 5.47e-03h  1
   2  7.9818312e+00 2.22e-16 3.72e+02  -1.0 3.14e+00    -  6.50e-03 1.00e+00h  1
   3  8.0084526e+00 4.44e-16 8.85e-01  -1.0 1.09e-02    -  1.00e+00 1.00e+00f  1
   4  8.0059375e+00 4.44e-16 1.01e-03  -1.7 8.00e-04    -  1.00e+00 1.00e+00f  1
   5  7.9808356e+00 2.22e-16 4.95e-01  -3.8 1.11e-02    -  1.00e+00 1.00e+00f  1
   6  7.9803142e+00 0.00e+00 2.33e-02  -3.8 8.93e-04    -  1.00e+00 1.00e+00f  1
   7  7.9802377e+00 2.22e-16 5.30e-03  -3.8 3.58e-04    -  1.00e+00 1.00e+00f  1
   8  7.9802365e+00 1.11e-16 7.93e-06  -3.8 1.30e-05    -  1.00e+00 1.00e+00f  1
   9  7.9802314e+00 2.22e-16 6.59e-04  -5.7 1.17e-04    -  1.00e+00 1.00e+00f  1
iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls
  10  7.9802314e+00 2.22e-16 2.93e-08  -5.7 7.60e-07    -  1.00e+00 1.00e+00h  1
  11  7.9802314e+00 2.22e-16 1.09e-07  -8.6 1.46e-06    -  1.00e+00 1.00e+00h  1
  12  7.9802314e+00 1.11e-16 2.60e-14  -8.6 1.23e-10    -  1.00e+00 1.00e+00h  1

Number of Iterations....: 12

                                   (scaled)                 (unscaled)
Objective...............:   3.2028226115109715e+00    7.9802314272782713e+00
Dual infeasibility......:   2.6014114945468764e-14    6.4817407275241498e-14
Constraint violation....:   1.1102230246251565e-16    1.1102230246251565e-16
Variable bound violation:   0.0000000000000000e+00    0.0000000000000000e+00
Complementarity.........:   2.5059035775423343e-09    6.2437708574182410e-09
Overall NLP error.......:   2.5059035775423343e-09    6.2437708574182410e-09


Number of objective function evaluations             = 13
Number of objective gradient evaluations             = 13
Number of equality constraint evaluations            = 13
Number of inequality constraint evaluations          = 0
Number of equality constraint Jacobian evaluations   = 1
Number of inequality constraint Jacobian evaluations = 0
Number of Lagrangian Hessian evaluations             = 12
Total seconds in IPOPT                               = 0.006

EXIT: Optimal Solution Found.

julia> objective_value(model)
7.980231427278271

julia> value.(x)
3-element Vector{Float64}:
 0.004325149667465344
 3.1554184142833086
 1.7118698431331894

In many cases you really should consider writing out the functions algebraically though. if only to think carefully about what you are actually modelling.

3 Likes

Thanks.
My actual functions are deterministic, the random coefficients car and sar were created just for easy instantiation.

So the data types in the “intermediate” functions have to be AffExpr.
Is there a way of getting around this? At the moment I have a very complicated function (defined in terms of other functions), and it seems that to make it work with JuMP I have to restructure the entire code from scratch.
From your answer, it seems that I can use the data type T instead, right?

So the data types in the “intermediate” functions have to be AffExpr.

No. This is just an example. For zeros(T, N), you should use the appropriate T, whatever that may be.

You could also do:

xp = [2 * x[ij] for ij in 1:N]

and let Julia figure out the correct type. You’re only having a problem because you are trying to pre-allocate the buffer xp.

The pre-allocation is also the cause of your original issue. You used
res = zeros(Float64, 1); but x[ij]^2 is not a Float64, it was a Symbolics.Num.

At the moment I have a very complicated function (defined in terms of other functions), and it seems that to make it work with JuMP I have to restructure the entire code from scratch.

You could also register a user-defined operator:

But if your function is very complicated, see:

What you should use depends on whether you can automatically differentiate the function, so if you want advice, perhaps you could post a more realistic example? It’s hard to offer general advised based on the simple example above.