(JuMP+NLopt) Only initial value returned without error warning

For a nonlinear optimization problem with 9 parameters, calling Ipopt works well, while calling NLopt only returns the initial value without any error warning. Could someone tell me how to correct it?
Using NLopt:

using JuMP
using NLopt
using LinearAlgebra
testit()
function testit()
    lb = [-1, -1, -1, -1, -1, -1, -1, -1, -1]
    ub = [ 1,  1,  1,  1,  1,  1,  1,  1,  1]
    x0 = [0.7, 0.7, 0.07, -0.4, 0.5, -0.7, -0.5, 0.5, 0.7]

    model = Model(NLopt.Optimizer)
    set_optimizer_attribute(model, "algorithm", :LD_SLSQP)
    @variable(model, lb[1] ≤ x1 ≤ ub[1], start = x0[1])
    @variable(model, lb[2] ≤ x2 ≤ ub[2], start = x0[2])
    @variable(model, lb[3] ≤ x3 ≤ ub[3], start = x0[3])
    @variable(model, lb[4] ≤ x4 ≤ ub[4], start = x0[4])
    @variable(model, lb[5] ≤ x5 ≤ ub[5], start = x0[5])
    @variable(model, lb[6] ≤ x6 ≤ ub[6], start = x0[6])
    @variable(model, lb[7] ≤ x7 ≤ ub[7], start = x0[7])
    @variable(model, lb[8] ≤ x8 ≤ ub[8], start = x0[8])
    @variable(model, lb[9] ≤ x9 ≤ ub[9], start = x0[9])

    @NLobjective(model, Min, x1^2+x2+x3^2+x4+x5^2+x6+x7^2+x8+x9^2)
    @constraint(model, [x1 x2 x3; x4 x5 x6; x7 x8 x9]*[x1 x2 x3; x4 x5 x6; x7 x8 x9]' .== Matrix{Float64}(I, 3, 3))

    JuMP.optimize!(model)
    println("got ", objective_value(model), " at ")
    println(" x1:  ", value(x1))
    println(" x2:  ", value(x2))
    println(" x3:  ", value(x3))
    println(" x4:  ", value(x4))
    println(" x5:  ", value(x5))
    println(" x6:  ", value(x6))
    println(" x7:  ", value(x7))
    println(" x8:  ", value(x8))
    println(" x9:  ", value(x9))
end

got 1.5848999999999998 at
x1: 0.7
x2: 0.7
x3: 0.07
x4: -0.4
x5: 0.5
x6: -0.7
x7: -0.5
x8: 0.5
x9: 0.7

Using Ipopt:

using JuMP
using Ipopt
using LinearAlgebra
testit()
function testit()
    lb = [-1, -1, -1, -1, -1, -1, -1, -1, -1]
    ub = [ 1,  1,  1,  1,  1,  1,  1,  1,  1]
    x0 = [0.7, 0.7, 0.07, -0.4, 0.5, -0.7, -0.5, 0.5, 0.7]

    model = Model(Ipopt.Optimizer)
    set_optimizer_attribute(model, "max_cpu_time", 60.0)
    set_optimizer_attribute(model, "print_level", 0)
    set_silent(model)
    
    @variable(model, lb[1] ≤ x1 ≤ ub[1], start = x0[1])
    @variable(model, lb[2] ≤ x2 ≤ ub[2], start = x0[2])
    @variable(model, lb[3] ≤ x3 ≤ ub[3], start = x0[3])
    @variable(model, lb[4] ≤ x4 ≤ ub[4], start = x0[4])
    @variable(model, lb[5] ≤ x5 ≤ ub[5], start = x0[5])
    @variable(model, lb[6] ≤ x6 ≤ ub[6], start = x0[6])
    @variable(model, lb[7] ≤ x7 ≤ ub[7], start = x0[7])
    @variable(model, lb[8] ≤ x8 ≤ ub[8], start = x0[8])
    @variable(model, lb[9] ≤ x9 ≤ ub[9], start = x0[9])

    @NLobjective(model, Min, x1^2+x2+x3^2+x4+x5^2+x6+x7^2+x8+x9^2)
    @constraint(model, [x1 x2 x3; x4 x5 x6; x7 x8 x9]*[x1 x2 x3; x4 x5 x6; x7 x8 x9]' .== Matrix{Float64}(I, 3, 3))

    JuMP.optimize!(model)
    println("got ", objective_value(model), " at ")
    println(" x1:  ", value(x1))
    println(" x2:  ", value(x2))
    println(" x3:  ", value(x3))
    println(" x4:  ", value(x4))
    println(" x5:  ", value(x5))
    println(" x6:  ", value(x6))
    println(" x7:  ", value(x7))
    println(" x8:  ", value(x8))
    println(" x9:  ", value(x9))
end

got -1.8284271244830348 at
x1: -0.500000014712519
x2: -0.7071067831367339
x3: 0.49999998301859216
x4: -0.7071067591675974
x5: 5.788661909852154e-13
x6: -0.707106803904326
x7: 0.5000000173810248
x8: -0.7071067792414213
x9: -0.49999998585490696

Your problem is non-convex, so the solver will find a local minima. Try passing different starting points.

You can also simplify some of your code:

function testit()
    lb = [-1, -1, -1, -1, -1, -1, -1, -1, -1]
    ub = [ 1,  1,  1,  1,  1,  1,  1,  1,  1]
    x0 = [0.7, 0.7, 0.07, -0.4, 0.5, -0.7, -0.5, 0.5, 0.7]
    model = Model(NLopt.Optimizer)
    set_optimizer_attribute(model, "algorithm", :LD_SLSQP)
    @variable(model, lb[i] <= x[i=1:9] <= ub[i], start = x0[i])
    @NLobjective(model, Min, sum(x[i]^2 for i in 1:2:9) + sum(x[i] for i in 2:2:8))
    X = reshape(x, 3, 3)
    @constraint(model, X * X' .== Matrix{Float64}(I, 3, 3))
    optimize!(model)
    println("got ", objective_value(model), " at ")
    for i in 1:9
        println(" x$i:  ", value(x[i]))
    end
end

Thanks for the help. I tried different starting points but still returned the same parameters.
Running your code output the same:

got 1.5848999999999998 at
 x1:  0.7
 x2:  0.7
 x3:  0.07
 x4:  -0.4
 x5:  0.5
 x6:  -0.7
 x7:  -0.5
 x8:  0.5
 x9:  0.7

What if you using the solution from Ipopt as the new starting point?

You could also try a non-convex solver like Gurobi.jl, which can find the global optimal solution to this problem.

julia> using JuMP, Gurobi

julia> function testit()
           lb = [-1, -1, -1, -1, -1, -1, -1, -1, -1]
           ub = [ 1,  1,  1,  1,  1,  1,  1,  1,  1]
           model = Model(Gurobi.Optimizer)
           set_optimizer_attribute(model, "NonConvex", 2)
           @variable(model, -1 <= x[i=1:9] <= 1)
           @objective(model, Min, sum(x[i]^2 for i in 1:2:9) + sum(x[i] for i in 2:2:8))
           X = reshape(x, 3, 3)
           @constraint(model, X * X' .== [1 0 0; 0 1 0; 0 0 1])
           optimize!(model)
           println("got ", objective_value(model), " at ")
           for i in 1:9
               println(" x$i:  ", value(x[i]))
           end
           return
       end
testit (generic function with 1 method)

julia> testit()
Academic license - for non-commercial use only - expires 2022-01-06
Gurobi Optimizer version 9.1.0 build v9.1.0rc0 (mac64)
Thread count: 4 physical cores, 8 logical processors, using up to 8 threads
Optimize a model with 0 rows, 9 columns and 0 nonzeros
Model fingerprint: 0x4d5febfb
Model has 5 quadratic objective terms
Model has 9 quadratic constraints
Coefficient statistics:
  Matrix range     [0e+00, 0e+00]
  QMatrix range    [1e+00, 1e+00]
  Objective range  [1e+00, 1e+00]
  QObjective range [2e+00, 2e+00]
  Bounds range     [1e+00, 1e+00]
  RHS range        [0e+00, 0e+00]
  QRHS range       [1e+00, 1e+00]

Continuous model is non-convex -- solving as a MIP.

Presolve time: 0.00s
Presolved: 72 rows, 27 columns, 180 nonzeros
Presolved model has 5 quadratic objective terms
Presolved model has 18 bilinear constraint(s)
Variable types: 27 continuous, 0 integer (0 binary)

Root relaxation: objective -3.500000e+00, 47 iterations, 0.00 seconds

    Nodes    |    Current Node    |     Objective Bounds      |     Work
 Expl Unexpl |  Obj  Depth IntInf | Incumbent    BestBd   Gap | It/Node Time

     0     0   -3.50000    0    7          -   -3.50000      -     -    0s
     0     0   -3.50000    0    7          -   -3.50000      -     -    0s
     0     0   -3.00002    0   10          -   -3.00002      -     -    0s
     0     0   -3.00002    0   15          -   -3.00002      -     -    0s
     0     0   -3.00001    0   17          -   -3.00001      -     -    0s
     0     0   -3.00001    0   17          -   -3.00001      -     -    0s
     0     0   -2.94433    0   17          -   -2.94433      -     -    0s
     0     0   -2.90557    0   16          -   -2.90557      -     -    0s
     0     0   -2.88113    0   18          -   -2.88113      -     -    0s
     0     0   -2.84620    0   13          -   -2.84620      -     -    0s
     0     0   -2.84620    0   13          -   -2.84620      -     -    0s
     0     0   -2.83531    0   14          -   -2.83531      -     -    0s
     0     0   -2.83480    0   16          -   -2.83480      -     -    0s
     0     0   -2.83403    0   14          -   -2.83403      -     -    0s
     0     0   -2.83337    0   17          -   -2.83337      -     -    0s
     0     0   -2.83325    0   14          -   -2.83325      -     -    0s
     0     0   -2.83308    0   17          -   -2.83308      -     -    0s
     0     0   -2.83101    0   14          -   -2.83101      -     -    0s
     0     0   -2.83074    0   17          -   -2.83074      -     -    0s
     0     0   -2.83071    0   14          -   -2.83071      -     -    0s
     0     0   -2.83071    0   17          -   -2.83071      -     -    0s
     0     0   -2.83064    0   18          -   -2.83064      -     -    0s
H    0     0                      -1.3660254   -2.83064   107%     -    0s
     0     2   -2.83064    0   18   -1.36603   -2.83064   107%     -    0s
*  205   196              28      -1.4097835   -2.63375  86.8%  16.8    0s
*  213   196              27      -1.4122227   -2.63375  86.5%  16.8    0s
*  394   287              33      -1.8210366   -2.63375  44.6%  17.1    0s
*  932   197              30      -1.8280528   -2.05354  12.3%  19.6    0s
*  933   197              30      -1.8280649   -2.05354  12.3%  19.6    0s
*  934   197              28      -1.8280742   -2.05354  12.3%  19.6    0s
* 1393   195              30      -1.8281543   -1.86833  2.20%  17.7    0s
* 1394   195              30      -1.8281591   -1.86833  2.20%  17.7    0s
* 1821   306              37      -1.8283993   -1.83812  0.53%  15.9    0s
* 1825   306              37      -1.8284012   -1.83812  0.53%  15.8    0s
* 2600   212              32      -1.8284121   -1.82993  0.08%  14.2    0s
* 2602   212              32      -1.8284122   -1.82993  0.08%  14.2    0s
* 2761   212              33      -1.8284174   -1.82956  0.06%  14.0    0s
* 2765   212              34      -1.8284178   -1.82956  0.06%  14.0    0s
* 2847   222              37      -1.8284194   -1.82946  0.06%  13.9    0s
* 2850   222              37      -1.8284194   -1.82946  0.06%  13.8    0s
* 2868   222              33      -1.8284209   -1.82946  0.06%  13.8    0s
* 2928   222              34      -1.8284233   -1.82940  0.05%  13.8    0s
* 2931   222              34      -1.8284236   -1.82940  0.05%  13.8    0s
* 2955   222              34      -1.8284263   -1.82940  0.05%  13.7    0s
* 3045   222              34      -1.8284266   -1.82933  0.05%  13.6    0s
* 3047   222              35      -1.8284269   -1.82933  0.05%  13.6    0s
* 3048   222              35      -1.8284277   -1.82933  0.05%  13.6    0s
* 3286   207              33      -1.8284288   -1.82893  0.03%  13.3    0s
* 3697   156              38      -1.8284294   -1.82870  0.01%  12.9    0s

Cutting planes:
  PSD: 59

Explored 3945 nodes (50162 simplex iterations) in 0.35 seconds
Thread count was 8 (of 8 available processors)

Solution count 10: -1.82843 -1.82843 -1.82843 ... -1.82842

Optimal solution found (tolerance 1.00e-04)
Warning: max constraint violation (1.9429e-06) exceeds tolerance
Best objective -1.828429357145e+00, best bound -1.828529615365e+00, gap 0.0055%

User-callback calls 8114, time in user-callback 0.00 sec
got -1.8284293571453163 at 
 x1:  0.4999205854780597
 x2:  -0.7069905555962632
 x3:  -0.500242454538445
 x4:  -0.707565881421891
 x5:  -0.0003278618041877305
 x6:  -0.7066470811910285
 x7:  -0.49942824157137605
 x8:  -0.7072242852874656
 x9:  0.5004065000292449

Thank you. The Gurobi works, while using the solution from Ipopt as the new starting point in NLopt still failed. As the similar operation needs to be conducted thousands of times, I want the function to be efficient and thread-safe (Ipopt is not), that’s why NLopt is required. Testing these codes, it seems Gurobi is the most time-consuming (should not it be the fastest? :joy:). Could you give me some suggestions on it?

Solving nonconvex quadratic programs is difficult. Gurobi takes longer because it can find the global optimum.

So you can choose between fast local optimum or slower global optimum.

There might be other algorithms in NLopt that find better solutions. You should try a few out

A great thanks for your suggestion.
I’m now merging this part into my simulation code but have encountered some trouble. It is due to the ForwardDiff issue shown as ERROR: InexactError: Float64(NaN + NaN*im) . I searched a lot but can not fix it. It seems I missed some rules for AD in writing the fitgrains function. Could you have a look and give me some tips?
Thanks in advance :pray:

function refinegrain(
    grain_spot_list::Array{Float64,2}, 
    parameters::para, 
    U::Array{Float64,2}, 
    B::Array{Float64,2}, 
    pos::Array{Float64,2})

    x0 = [U[:]; pos[:]]
    lb = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -0.5, -0.5, -0.5]
    ub = [ 1,  1,  1,  1,  1,  1,  1,  1,  1,  0.5,  0.5,  0.5]
    
    model = Model(NLopt.Optimizer)
    set_optimizer_attribute(model, "algorithm", :LN_COBYLA)
  
    @variable(model, lb[1] ≤ x1 ≤ ub[1], start = x0[1])
    @variable(model, lb[2] ≤ x2 ≤ ub[2], start = x0[2])
    @variable(model, lb[3] ≤ x3 ≤ ub[3], start = x0[3])
    @variable(model, lb[4] ≤ x4 ≤ ub[4], start = x0[4])
    @variable(model, lb[5] ≤ x5 ≤ ub[5], start = x0[5])
    @variable(model, lb[6] ≤ x6 ≤ ub[6], start = x0[6])
    @variable(model, lb[7] ≤ x7 ≤ ub[7], start = x0[7])
    @variable(model, lb[8] ≤ x8 ≤ ub[8], start = x0[8])
    @variable(model, lb[9] ≤ x9 ≤ ub[9], start = x0[9])
    @variable(model, lb[10] ≤ x10 ≤ ub[10], start = x0[10])
    @variable(model, lb[11] ≤ x11 ≤ ub[11], start = x0[11])
    @variable(model, lb[12] ≤ x12 ≤ ub[12], start = x0[12])

    function fitgrains(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12)
        U = [x1 x4 x7; x2 x5 x8; x3 x6 x9]
        pos = [x10 x11 x12]
        refined_Gvs = zeros(size(grain_spot_list,1),3)
        Gv = zeros(size(grain_spot_list,1),3)
        Gv_angle = zeros(size(grain_spot_list,1))
        for i in 1:size(grain_spot_list,1)
            rot = grain_spot_list[i,2]
            Omega = euler2u(rot, 0., 0.)
            spots_WeightedCentroid = grain_spot_list[[i],6:7]  # experimental spot
            pos_rot = Omega*pos'
            refined_Gvs[i,:] = spotpos2gvector(spots_WeightedCentroid, parameters, pos_rot)
            Gv[i,:] = Omega*U*B*grain_spot_list[i,8:10]
            Gv_angle[i] = acos(sum(normr(Gv[[i],:]).*normr(refined_Gvs[[i],:]), dims=2))[1]
        end
        return angle_sum = sum(abs.(Gv_angle))
    end

    register(model, :fitgrains, 12, fitgrains; autodiff = true)
    @NLobjective(model, Min, fitgrains(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12))
    @constraint(model, [x1 x4 x7; x2 x5 x8; x3 x6 x9]*[x1 x4 x7; x2 x5 x8; x3 x6 x9]' .== Matrix{Float64}(I, 3, 3))
    JuMP.optimize!(model)

    refined_ori = [
        value(x1) value(x4) value(x7)
        value(x2) value(x5) value(x6)
        value(x3) value(x6) value(x9)]
    refined_pos = [value(x10) value(x11) value(x12)]
    refined_error = objective_value(model)
    return refined_ori, refined_pos, refined_error
end
function normr(a::Array{Float64,2})
    a./sqrt.(sum(a.^2, dims=2))
end

Output information:

julia> refinegrain(
           grain_spot_list::Array{Float64,2},
               parameters::para,
           U::Array{Float64,2},
               B::Array{Float64,2},
                   pos::Array{Float64,2})
ERROR: InexactError: Float64(NaN + NaN*im)
Stacktrace:
  [1] Real
    @ .\complex.jl:44 [inlined]
  [2] convert
    @ .\number.jl:7 [inlined]
  [3] setindex!
    @ .\array.jl:903 [inlined]
  [4] (::var"#fitgrains#128"{Matrix{Float64}, para, Matrix{Float64}})(x1::Float64, x2::Float64, x3::Float64, x4::Float64, x5::Float64, x6::Float64, x7::Float64, x8::Float64, x9::Float64, x10::Float64, x11::Float64, x12::Float64)
    @ Main C:\Users\rulia\OneDrive\My_code\CodeOther\ZhangYubin\julia\function_indexing_ff_LabDCT.jl:693
  [5] (::JuMP.var"#137#139"{var"#fitgrains#128"{Matrix{Float64}, para, Matrix{Float64}}})(x::SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true})
    @ JuMP C:\Users\rulia\.julia\packages\JuMP\2IF9U\src\nlp.jl:1774
  [6] eval_objective(d::JuMP._UserFunctionEvaluator, x::SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true})
    @ JuMP C:\Users\rulia\.julia\packages\JuMP\2IF9U\src\nlp.jl:1762
  [7] eval_and_check_return_type(::Function, ::Type, ::JuMP._UserFunctionEvaluator, ::Vararg{Any})
    @ JuMP._Derivatives C:\Users\rulia\.julia\packages\JuMP\2IF9U\src\_Derivatives\forward.jl:5
  [8] forward_eval(storage::Vector{Float64}, partials_storage::Vector{Float64}, nd::Vector{JuMP._Derivatives.NodeData}, adj::SparseArrays.SparseMatrixCSC{Bool, Int64}, const_values::Vector{Float64}, parameter_values::Vector{Float64}, x_values::Vector{Float64}, subexpression_values::Vector{Float64}, user_input_buffer::Vector{Float64}, user_output_buffer::Vector{Float64}, user_operators::JuMP._Derivatives.UserOperatorRegistry)
    @ JuMP._Derivatives C:\Users\rulia\.julia\packages\JuMP\2IF9U\src\_Derivatives\forward.jl:175
  [9] _forward_eval_all(d::NLPEvaluator, x::Vector{Float64})
    @ JuMP C:\Users\rulia\.julia\packages\JuMP\2IF9U\src\nlp.jl:683
 [10] macro expansion
    @ C:\Users\rulia\.julia\packages\JuMP\2IF9U\src\nlp.jl:804 [inlined]
 [11] macro expansion
    @ .\timing.jl:299 [inlined]
 [12] eval_constraint_jacobian(d::NLPEvaluator, J::Vector{Float64}, x::Vector{Float64})
    @ JuMP C:\Users\rulia\.julia\packages\JuMP\2IF9U\src\nlp.jl:802
 [13] (::NLopt.var"#g_eq#45"{NLopt.Optimizer, Vector{Float64}, Vector{Float64}, Vector{Tuple{Int64, Int64}}, Int64, Vector{Int64}, Vector{Int64}})(result::Vector{Float64}, x::Vector{Float64}, jac::Matrix{Float64})
    @ NLopt C:\Users\rulia\.julia\packages\NLopt\zt9hU\src\MOI_wrapper.jl:880
 [14] optimize!(model::NLopt.Optimizer)
    @ NLopt C:\Users\rulia\.julia\packages\NLopt\zt9hU\src\MOI_wrapper.jl:900
 [15] optimize!
    @ C:\Users\rulia\.julia\packages\MathOptInterface\eoIu0\src\Bridges\bridge_optimizer.jl:348 [inlined]
 [16] optimize!
    @ C:\Users\rulia\.julia\packages\MathOptInterface\eoIu0\src\MathOptInterface.jl:81 [inlined]
 [17] optimize!(m::MathOptInterface.Utilities.CachingOptimizer{MathOptInterface.Bridges.LazyBridgeOptimizer{NLopt.Optimizer}, MathOptInterface.Utilities.UniversalFallback{MathOptInterface.Utilities.Model{Float64}}})
    @ MathOptInterface.Utilities C:\Users\rulia\.julia\packages\MathOptInterface\eoIu0\src\Utilities\cachingoptimizer.jl:285
 [18] optimize!(model::Model, optimizer_factory::Nothing; bridge_constraints::Bool, ignore_optimize_hook::Bool, kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
    @ JuMP C:\Users\rulia\.julia\packages\JuMP\2IF9U\src\optimizer_interface.jl:195
 [19] optimize! (repeats 2 times)
    @ C:\Users\rulia\.julia\packages\JuMP\2IF9U\src\optimizer_interface.jl:167 [inlined]
 [20] refinegrain(grain_spot_list::Matrix{Float64}, parameters::para, U::Matrix{Float64}, B::Matrix{Float64}, pos::Matrix{Float64})
    @ Main C:\Users\rulia\OneDrive\My_code\CodeOther\ZhangYubin\julia\function_indexing_ff_LabDCT.jl:701
 [21] top-level scope
    @ REPL[611]:1

It looks like fitgains ends up generating a complex number somewhere. Make sure it is defined for all possible input values.

I can’t reproduce your code because I don’t know what some of the functions are.