As the title suggests, I create a structure (the begin-end block) and wrote a procedure (while-end).
import JuMP, Gurobi; GRB_ENV = Gurobi.Env();
COT = 1e-7 # cut-off tolerance
model = JuMP.direct_model(Gurobi.Optimizer(GRB_ENV))
JuMP.set_silent(model)
JuMP.@variable(model, x[i = 1:4])
JuMP.@objective(model, Min, sum(x))
JuMP.set_lower_bound.(x[1:3], 0)
JuMP.@constraint(model, c_inherent, x[4] >= 0)
begin # 🟤 create the lazy cut structure
c_lazy_vec = [JuMP.@build_constraint(x[i] >= i) for i in 1:3]
c_lazy_ref = Vector{JuMP.ConstraintRef}(undef, 3)
is_idle_vec = trues(3) # all lazy cuts are idle initially
end
JuMP.optimize!(model); JuMP.assert_is_solved_and_feasible(model)
xv = JuMP.value.(x) # the initial trial point (which might be infeasible)
# 🟣 the procedure of adding indispensable lazy cuts
cnt = 0
while true
exists_violating = false
for i in findall(is_idle_vec)
if xv[i] < i - COT # 🔵 interplay of the current trial `xv` and the i-th lazy cut
exists_violating = true
@info "the $i-th lazy cut is an idle and (strongly) violating cut, thus we are adding it"
c_lazy_ref[i], is_idle_vec[i] = JuMP.add_constraint(model, c_lazy_vec[i]), false # 🟢
JuMP.optimize!(model); JuMP.assert_is_solved_and_feasible(model)
xv = JuMP.value.(x)
@info "the trial point is updated to $xv"
@info "the idle vector is updated to $is_idle_vec"
cnt += 1; break
end
end
if !exists_violating
@info "after adding $cnt lazy cuts, the current trial `xv` is proved to be feasible"
break
end
end
# 🟠 if we want to strip off the 2nd lazy cut
JuMP.delete(model, c_lazy_ref[2])
is_idle_vec[2] = true
JuMP.optimize!(model); JuMP.assert_is_solved_and_feasible(model)
xv = JuMP.value.(x)
@info xv
You’re welcome to use this approach, but it is not our recommended way to write an iterative cutting plane algorithm. I would write your code as:
using JuMP
import Gurobi
GRB_ENV = Gurobi.Env()
atol = 1e-7 # cut-off tolerance
model = Model(() -> Gurobi.Optimizer(GRB_ENV))
set_silent(model)
@variable(model, x[i in 1:4] >= 0)
@objective(model, Min, sum(x))
constraints_added = Dict{Int,ConstraintRef}()
is_idle_vec = trues(3)
is_infeasible = true
while is_infeasible
is_infeasible = false
optimize!(model)
assert_is_solved_and_feasible(model)
xv = value.(x)
for i in findall(is_idle_vec)
if xv[i] < i - atol
is_infeasible = true
constraints_added[i] = @constraint(model, x[i] >= i)
is_idle_vec[i] = false
end
end
end
delete(model, constraints_added[2])
optimize!(model)
assert_is_solved_and_feasible(model)
value.(x)
A key benefit of a lazy constraint/cutting plane approach is that creating the data for the constraints can be delayed until they are proved to be needed. If you can enumerate all of the constraints and have the memory to create them via @build_constraint, you probably don’t need to lazy constraint approach.
You omitted my innermost break? Don’t quite understand.
I take your point on not using @build_constraint.
It is a folklore that Dict can be slow (here), therefore I thought use Array could be faster. (#undef will not occupy my memory?)
And I use the name exists_violation because I may execute many blocks of this lazy-cut (thus it is not necessarily feasible after only one batch).
Finally, I suspect that findall might be a bit lengthy
Therefore I think it might be better to loop directly.
In a 2-dimensional case, the revised code might look like this
is_cd_idle, cd_ref = trues(M, N), Matrix{JuMP.ConstraintRef}(undef, M, N)
while true
exists_violation = false
for i in 1:is_cd_idle.dims[1], j in 1:is_cd_idle.dims[2]
is_cd_idle[i, j] || continue
R_x[i, j] + DI[i] - DJ[j] < -COT || continue
exists_violation = true
is_cd_idle[i, j], cd_ref[i, j] = false, JuMP.@constraint(lpr, R_x[i, j] + lpr_DI[i] - lpr_DJ[j] >= 0)
solve_to_normality(lpr)
DI, DJ = JuMP.value.(lpr_DI), JuMP.value.(lpr_DJ)
break
end
exists_violation || break # add how many cuts = sum(.!(is_cd_idle))
end
@info "after adding $(count((.!is_cd_idle))) cuts, there is no more violation"