Hi everyone.
I’m trying to do constrained optimization like:
\min_{x} f(x)
s.t.
c(x)\le u_x
I would like to solve the problem only with the function and constraint. I tried with Optim and NLopt, and the examples I found are always with gradient or constraint Jacobian. For example:
Optim
using Optim
fun(x) = (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2
con_c!(c, x) = (c[1] = x[1]^2 + x[2]^2; c)
function con_jacobian!(J, x)
J[1,1] = 2*x[1]
J[1,2] = 2*x[2]
J
end
function con_h!(h, x, λ)
h[1,1] += λ[1]*2
h[2,2] += λ[1]*2
end
lx = Float64[]; ux = Float64[]
lc = [-Inf]; uc = [0.5^2]
dfc = TwiceDifferentiableConstraints(con_c!, con_jacobian!, con_h!,
lx, ux, lc, uc)
res = optimize(df, dfc, x0, IPNewton())
The TwiceDifferentiableConstraints() function needs the Jacobian and Hessian.
NLopt
using NLopt
function rosenbrockf(x::Vector,grad::Vector)
if length(grad) > 0
grad[1] = -2.0 * (1.0 - x[1]) - 400.0 * (x[2] - x[1]^2) * x[1]
grad[2] = 200.0 * (x[2] - x[1]^2)
end
return (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2
end
function r_constraint(x::Vector, grad::Vector; radius = 0.8)
if length(grad) > 0
grad[1] = 2*x[1]
grad[2] = 2*x[2]
end
return x[1]^2 + x[2]^2 - radius
end
rad = 1.0
opt = Opt(:LD_MMA, 2)
lower_bounds!(opt, [-5, -5.0])
min_objective!(opt,(x,g) -> rosenbrockf(x,g))
inequality_constraint!(opt, (x,g) -> r_constraint(x,g,radius = rad))
ftol_rel!(opt,1e-9)
(minfunc,minx,ret) = optimize(opt, [-1.0,0.0])
The LD_MMA algorithm needs the gradient. I tried other solvers, but I get: (0.0, [-1.0, 0.0], :FORCED_STOP)
Do you know a way to solve this problem without the use of gradient?