Hi everyone,

I am trying to reproduce this optimization below using JuMP.

I’ve written a function below to solve this optimization.

```
function LEDOA(V::Function, V_dot::Function, N::Int)
model = Model(Ipopt.Optimizer)
register(model, :V, 1, V; autodiff=true)
register(model, :V_dot, 1, V_dot; autodiff=true)
@variable(model, c >= eps())
@variable(model, x[1:N])
@NLobjective(model, Min, c)
@NLexpression(model, vx, V(x))
@NLexpression(model, vxdot, V_dot(x))
@constraint(model, vx - c == 0)
@constraint(model, vxdot == 0)
optimize!(model)
status = termination_status(model)
if status == MOI.OPTIMAL
return value(c), value.(x)
else
@info "Optimization failed: $status"
return nothing, nothing
end
end
```

I have a self-defined scalar function V(x) that is required for the constraints. An example of this might be

```
V = (x) -> (x' * P * x)[1]
Vdot = (x) -> x' * P * A * x + x' * P * H * kron(x, x)
```

But with this, I am receiving the following error:

```
ERROR: Unexpected array JuMP.VariableRef[_[2], _[3]] in nonlinear expression. Nonlinear expressions may contain only scalar expressions.
Stacktrace:
[1] error(s::String)
@ Base .\error.jl:35
[2] parse_expression(::MathOptInterface.Nonlinear.Model, expr::MathOptInterface.Nonlinear.Expression, x::Vector{…}, parent_index::Int64)
```

Since V(x) is a scalar function, I expect this to work for `@NLexpression`

. I would like to know if my approach is wrong, and if it is, please let me know how to resolve this error. Thanks!

To reproduce this, please use the following example:

```
A = [-2.0 0.0; 0.0 -1.0]
H = [0.0 0.5 0.5 0.0; 0.0 0.5 0.5 0.0]
P = [0.02078 0; 0 0.01292]
V = (x) -> (x' * P * x)[1]
Vdot = (x) -> x' * P * A * x + x' * P * H * kron(x, x)
c, x = LEDOA(V, Vdot, 2)
```

I’m using Julia 1.10.0.

**EDIT 1**:

I tried the following code based on the new nonlinear modeling features I was unaware of until today.

```
function LEDOA(V::Function, V_dot::Function, N::Int)
@variable(model, c >= eps())
@variable(model, x[1:N])
@objective(model, Min, c)
@operator(model, vx, N, (x...) -> V(collect(x)))
@operator(model, vxdot, N, (x...) -> V_dot(collect(x)))
@constraint(model, vx(x...) .== c)
@constraint(model, vxdot(x...) .== 0)
optimize!(model)
status = termination_status(model)
return value(c), value.(x)
end
```

This did run without any errors but since this is a local solver the answer always ended up being 0, which is not what I want.

**EDIT 2**:

I am trying out Alpine.jl to do the global optimization. But the following code

```
function LEDOA(V::Function, V_dot::Function, N::Int)
ipopt = optimizer_with_attributes(Ipopt.Optimizer)
model = Model(
optimizer_with_attributes(
Alpine.Optimizer,
"nlp_solver" => ipopt,
),
)
set_string_names_on_creation(model, false)
register(model, :V, 1, V; autodiff=true)
register(model, :V_dot, 1, V_dot; autodiff=true)
@variable(model, c >= eps())
@variable(model, x[1:N])
@objective(model, Min, c)
@NLconstraint(model, V(x...) == c)
@NLconstraint(model, V_dot(x...) == 0)
@constraint(model, [i = 1:N], xtol <= x[i])
optimize!(model)
return value(c), value.(x)
end
```

produces the error

```
ERROR: Alpine does not currently support `V` operator acting on a variable
Stacktrace:
[1] error(s::String)
@ Base .\error.jl:35
[2] expr_arrangeargs(args::Vector{Any}; kwargs::@Kwargs{})
```

The problem I was solving was apparently a global optimization problem, and Alpine.jl seemed like my best choice (I did see that EAGO.jl was also an option). But I’m struggling to allow the constraints using the scalar functions. Does anyone know how to accomplish this?