I am not an expert but I think the way to go is caching the result of the last calculation in two variables, x_cached
and y_cached
. Then before performing the expensive computation, you check whether the vector x
is the same as x_cached
for which you have already computed Y
and stored it into y_cached
.
using NLopt
function SomeComplexCalculations(x)
println("complex calculation with $x")
return x.^2
end
function myoptim()
function myfunc(x::Vector, grad::Vector)
if length(grad) > 0
grad[1] = 0
grad[2] = 0.5/sqrt(x[2])
end
if x_cached != x
println("objfunc: not cached $x")
x_cached[:] = x
y_cached[:] = SomeComplexCalculations(x)
else
println("objfunc: using cached $x")
end
return sqrt(x[2])
end
function myconstraint(x::Vector, grad::Vector, a, b)
if length(grad) > 0
grad[1] = 3a * (a*x[1] + b)^2
grad[2] = -1
end
if x_cached != x
println("constraint: not cached $x")
x_cached[:] = x
y_cached[:] = SomeComplexCalculations(x)
else
println("constraint: using cached $x")
end
(a*x[1] + b)^3 - x[2]
end
opt = Opt(:LD_MMA, 2)
x_cached = Vector{Float64}(undef, 2)
y_cached = Vector{Float64}(undef, 2)
opt.lower_bounds = [-Inf, 0.]
opt.xtol_rel = 1e-4
opt.min_objective = myfunc
inequality_constraint!(opt, (x,g) -> myconstraint(x,g,2,0), 1e-8)
inequality_constraint!(opt, (x,g) -> myconstraint(x,g,-1,1), 1e-8)
(minf,minx,ret) = optimize(opt, [1.234, 5.678])
numevals = opt.numevals # the number of function evaluations
println("got $minf at $minx after $numevals iterations (returned $ret)")
end
I was surprised to see that the objective function is computed first and then the constraints, but this may have to do with the choice of the algorithm.