I here furnish a large-scale case. I hope that given this new case, the user cut will be advantageous.
import JuMP, Gurobi; import JuMP.value as ๐; GRB_ENV = Gurobi.Env()
using Logging; global_logger(ConsoleLogger(Logging.Info))
import Random
Random.seed!(1);
COT = 1e-4
# Test case 1๏ธโฃ modest scale
# M, N = 9, 10; โญ = N/2
# ๐บ, C_y, C_z, ๐, ๐ฑ, ๐
= let
# ๐ = [2.75 12.33 8.58 0.67 13.58 7.33 9.42 5.67 9.83 4.0; 1.08 5.67 6.5 14.42 0.67 4.0 12.75 6.08 4.0 9.42; 4.42 6.92 12.75 4.42 11.5 7.75 6.92 7.33 13.17 11.08; 4.0 11.08 4.0 13.58 12.33 5.25 4.83 4.0 11.5 14.0; 3.17 1.08 11.92 8.58 11.5 1.08 7.75 1.5 5.67 3.17; 3.58 2.75 11.5 2.75 2.33 14.42 4.83 5.67 3.17 10.25; 11.92 9.83 11.92 2.33 2.75 13.17 1.92 2.33 12.75 8.17; 4.0 14.0 4.0 12.33 1.08 9.83 1.92 10.67 5.67 3.58; 14.0 9.83 12.33 14.0 7.33 11.92 8.58 2.75 12.33 10.67]
# C_z = 2 * [4.0, 2.5, 4.0, 4.0, 2.5, 2.5, 3.0, 3.5, 3.0]
# C_y = [10, 12, 10, 9, 12, 9, 12, 9, 11] .* C_z
# ๐บ = [224, 224, 221, 223, 221, 226, 224, 219, 218]
# ๐ฑ = [394, 70, 238, 6, 225, 383, 65, 136, 13, 83]
# ๐
= [40, 40, 40, 30, 10, 10, 40, 10, 30, 30]
# ๐บ, C_y, C_z, ๐, ๐ฑ, ๐
# end; ๐ = 20 * C_z;
# Test case 2๏ธโฃ large scale
M, N = 69, 100; โญ = N/2
๐บ, C_y, C_z, ๐, ๐ฑ, ๐
= let
๐ = R_x = rand(0.3:.00117:8.7, M, N)
C_z = rand(2:0.00117:5, M)
C_y = rand(9:0.017:15, M) .* C_z
๐บ = [2.42, 2.42, 1.78, 2.7, 2.06, 1.78, 1.14, 3.5, 3.1, 2.02, 1.82, 3.42, 2.42, 0.9, 3.06, 1.18, 1.7, 3.46, 1.46, 1.62, 1.26, 1.3, 3.46, 2.66, 1.82, 2.34, 1.54, 1.82, 2.58, 2.9, 2.66, 3.14, 2.5, 1.62, 2.78, 3.3, 1.38, 1.26, 2.74, 1.82, 3.14, 1.38, 2.74, 0.94, 3.06, 3.22, 2.62, 2.42, 3.02, 3.18, 1.58, 2.7, 3.06, 1.7, 3.22, 2.74, 2.54, 3.26, 1.3, 2.38, 3.3, 1.02, 2.42, 2.38, 3.1, 1.3, 2.02, 0.94, 3.46];
๐ฑ = [0.11, 0.57, 0.77, 0.35, 0.49, 0.25, 0.25, 0.45, 0.05, 0.77, 0.39, 0.83, 0.47, 0.55, 0.43, 0.47, 0.29, 0.49, 0.47, 0.75, 0.09, 0.59, 0.11, 0.09, 0.11, 0.49, 0.29, 0.61, 0.23, 0.53, 0.15, 0.85, 0.17, 0.23, 0.25, 0.57, 0.65, 0.47, 0.65, 0.11, 0.09, 0.13, 0.57, 0.29, 0.19, 0.39, 0.31, 0.63, 0.61, 0.51, 0.75, 0.41, 0.57, 0.59, 0.47, 0.05, 0.29, 0.49, 0.53, 0.79, 0.21, 0.83, 0.13, 0.31, 0.63, 0.43, 0.05, 0.33, 0.77, 0.43, 0.11, 0.41, 0.19, 0.61, 0.89, 0.43, 0.15, 0.35, 0.43, 0.63, 0.59, 0.89, 0.23, 0.23, 0.33, 0.63, 0.69, 0.35, 0.79, 0.31, 0.69, 0.81, 0.15, 0.19, 0.27, 0.29, 0.17, 0.83, 0.85, 0.45];
๐
= [0.56, 0.26, 0.5, 0.5, 0.71, 0.77, 0.32, 0.53, 0.38, 0.2, 0.2, 0.29, 0.68, 0.59, 0.38, 0.29, 0.38, 0.41, 0.26, 0.35, 0.56, 0.68, 0.8, 0.8, 0.62, 0.44, 0.47, 0.53, 0.68, 0.44, 0.26, 0.35, 0.44, 0.59, 0.59, 0.41, 0.53, 0.8, 0.29, 0.62, 0.29, 0.59, 0.23, 0.56, 0.68, 0.2, 0.44, 0.74, 0.65, 0.62, 0.8, 0.71, 0.35, 0.56, 0.23, 0.44, 0.65, 0.35, 0.38, 0.38, 0.44, 0.47, 0.5, 0.38, 0.74, 0.29, 0.2, 0.29, 0.62, 0.23, 0.41, 0.8, 0.77, 0.2, 0.35, 0.65, 0.32, 0.23, 0.74, 0.38, 0.68, 0.56, 0.59, 0.32, 0.8, 0.74, 0.38, 0.2, 0.5, 0.68, 0.74, 0.8, 0.68, 0.53, 0.44, 0.59, 0.26, 0.68, 0.44, 0.38];
๐บ, C_y, C_z, ๐, ๐ฑ, ๐
end; ๐ = 20 * C_z;
function sm(p, x) return sum(p .* x) end;
function optimise(m)
JuMP.optimize!(m)
return (
JuMP.termination_status(m),
JuMP.primal_status(m),
JuMP.dual_status(m)
)
end;
macro set_objective_function(m, f) return esc(:(JuMP.set_objective_function($m, JuMP.@expression($m, $f)))) end;
function Model(name)
m = JuMP.direct_model(Gurobi.Optimizer(GRB_ENV))
JuMP.MOI.set(m, JuMP.MOI.Name(), name)
s = name[end-1]
s == 'm' && JuMP.set_objective_sense(m, JuMP.MIN_SENSE)
s == 'M' && JuMP.set_objective_sense(m, JuMP.MAX_SENSE)
last(name) == 's' && JuMP.set_silent(m)
m
end;
function solve_to_normality(m)
t, p, d = optimise(m)
name = JuMP.name(m)
t == JuMP.OPTIMAL || error("$name: $(string(t))")
p == JuMP.FEASIBLE_POINT || error("$name: (primal) $(string(p))")
(name[end-2] == 'l' && d != p) && error("$name: (dual) $(string(d))")
return JuMP.result_count(m)
end;
function stage2_problem_primal(z, d)
st2 = Model("st2_primal_lms")
JuMP.@variable(st2, x[i = 1:M, j = 1:N] โฅ 0)
JuMP.@variable(st2, ฮถ[i = 1:M] โฅ 0) # an expensive substitute for `z`
JuMP.@constraint(st2, ๐ธ[i = 1:M], z[i] + ฮถ[i] โฅ sum(x[i, :]))
JuMP.@constraint(st2, ๐น[j = 1:N], sum(x[:, j]) โฅ d[j] )
@set_objective_function(st2, sm(๐, x) + sm(๐, ฮถ))
solve_to_normality(st2)
return JuMP.objective_value(st2)
end;
function g2d(g) return ๐ฑ .+ ๐
.* g end;
function set_๐ป_obj(z) return JuMP.set_objective_coefficient.(๐ป, ฤฑ, -z) end # a โ
Partial โ
setting
function set_๐ณ_obj(z, g) return JuMP.set_objective_coefficient.(๐ณ, [๐น; ๐ธ], [g2d(g); -z]) end
function set_๐ถ_obj(z, I, J) return @set_objective_function(๐ถ, sm(J, g2d(๐)) - sm(I, z)) end # also involve a constant
function set_user_cut(m, c) return JuMP.MOI.set(JuMP.backend(m), Gurobi.ConstraintAttribute("Lazy"), JuMP.index(c), -1) end
function mip_cb_function(cb_data, cb_where::Cint) # only used in Phase 2
cb_where != Gurobi.GRB_CB_MIPSOL && return
Gurobi.load_callback_variable_primal(cb_data, cb_where)
common_value = JuMP.callback_value(cb_data, common_expr)
o = JuMP.callback_value(cb_data, ๐)
z = JuMP.callback_value.(cb_data, ๐ฃ)
# then we seek a hazardous scene given the 1st stage trial solution `z`
set_๐ป_obj(z); Lหs = (ul = [Inf, -Inf], g = Vector{Float64}(undef, N)); solve_to_normality(๐ป);
Lหs.ul[1], Lหs.g[:] = JuMP.objective_bound(๐ป), ๐.(๐ ) # setting upper bound is one-off, get a heuristic primal solution
ub = common_value + Lหs.ul[1]
if ub < ๐ผหs.ul[1]
๐ผหs.ul[1], ๐ผหs.z[:] = ub, z
@info "update (global)ub to $ub โช"
end
while true
set_๐ณ_obj(z, Lหs.g); solve_to_normality(๐ณ);
set_๐ถ_obj(z, ๐.(๐ธ), ๐.(๐น)); solve_to_normality(๐ถ);
lb = JuMP.objective_value(๐ถ)
if lb > Lหs.ul[2]
Lหs.ul[2], Lหs.g[:] = lb, ๐.(๐)
else
@debug "After RLT_BCA, (sub)lb = $(Lหs.ul[2]) < $(Lหs.ul[1]) = (sub)ub"
break
end
end
set_๐ณ_obj(z, Lหs.g); solve_to_normality(๐ณ); cn, pz = sm(๐.(๐น), g2d(Lหs.g)), -๐.(๐ธ)
if o < cn + sm(pz, z) - COT
con = JuMP.@build_constraint(๐ >= cn + sm(pz, ๐ฃ))
JuMP.MOI.submit(๐ผ, JuMP.MOI.LazyConstraint(cb_data), con)
end
end
๐ผ = Model("st1_bms"); JuMP.@variable(๐ผ, ๐);
JuMP.@variables(๐ผ, begin 0 โค ๐ข[1:M] โค 1, Int; ๐ฃ[1:M] โฅ 0 end); JuMP.@constraint(๐ผ, ๐ฃ .โค ๐บ .* ๐ข); JuMP.set_objective_coefficient.(๐ผ, [๐ข; ๐ฃ], [C_y; C_z]);
๐ณ = Model("st2_dual_lMs");
JuMP.@variable(๐ณ, 0 โค ๐ธ[i = 1:M] โค ๐[i]);
JuMP.@variable(๐ณ, 0 โค ๐น[j = 1:N]); JuMP.@constraint(๐ณ, [j = 1:N, i = 1:M],
๐น[j] โค ๐[i, j] + ๐ธ[i]
); ๐ถ = Model("opt_g_lMs"); JuMP.@variable(๐ถ, 0 โค ๐[n = 1:N] โค 1); JuMP.@constraint(๐ถ, sum(๐) โค โญ);
๐ป = Model("lpr_lMs"); JuMP.@variable(๐ป, 0 โค ๐ [n = 1:N] โค 1); JuMP.@constraint(๐ป, sum(๐ ) โค โญ);
JuMP.@variable(๐ป, 0 โค ฤฑ[i = 1:M] โค ๐[i]);
JuMP.@variable(๐ป, 0 โค ศท[j = 1:N]); JuMP.@constraint(๐ป, ๐[i = 1:M, j = 1:N],
ศท[j] โค ๐[i, j] + ฤฑ[i]
);
JuMP.@variable(๐ป, 0 โค ๐ ศท[nj = 1:N]) # only diagonal, as it occurs in Obj
JuMP.@variable(๐ป, 0 โค ๐ Xฤฑ[n = 1:N, i = 1:M]) # needed to specify the upper_bound of `๐ ศท`
JuMP.set_objective_coefficient.(๐ป, [ศท; ๐ ศท], [๐ฑ; ๐
]) # Obj: the โ
Fixed โ
part
JuMP.@constraint(๐ป, ๐ ศท .โค ศท) # (๐ โค 1) โจ (0 โค ศท) [a part of 1/10] โ
JuMP.@constraint(๐ป, [n = 1:N, i = 1:M], ๐ ศท[n] โค ๐ [n] * ๐[i, n] + ๐ Xฤฑ[n, i]) # (g โฅ 0) โจ ๐ [a part of 1/10] โ
JuMP.@constraint(๐ป, [n = 1:N, i = 1:M], ๐ Xฤฑ[n, i] โค ๐[i] * ๐ [n]) # (g โฅ 0) โจ (๐ธ โค ๐) [1/10] ๐ฃ
JuMP.@constraint(๐ป, [n = 1:N, i = 1:M], ๐ Xฤฑ[n, i] โค ฤฑ[i]) # (๐ โค 1) โจ (0 โค ๐ธ) [1/10] ๐ฃ
JuMP.@constraint(๐ป, [i = 1:M], sum(๐ Xฤฑ[:, i]) โค โญ * ฤฑ[i]) # (sum(๐ ) โค โญ) โจ (0 โค ๐ธ) [1/10]
@info "๐ฃ Start Phase 0: adding an initial cut"
JuMP.unset_integer.(๐ข); ๐ผหs = (ul = [Inf, -Inf], z = Vector{Float64}(undef, M))
solve_to_normality(๐ผ); z = ๐.(๐ฃ); # โ๏ธ the initial solve of `st1_bms`, No cut yet, and lb is invalid
set_๐ป_obj(z); Lหs = (ul = [Inf, -Inf], g = Vector{Float64}(undef, N)); solve_to_normality(๐ป);
Lหs.ul[1], Lหs.g[:] = JuMP.objective_bound(๐ป), ๐.(๐ ) # setting upper bound is one-off, get a heuristic primal solution
while true
set_๐ณ_obj(z, Lหs.g); solve_to_normality(๐ณ)
set_๐ถ_obj(z, ๐.(๐ธ), ๐.(๐น)); solve_to_normality(๐ถ);
lb = JuMP.objective_value(๐ถ)
if lb > Lหs.ul[2]
Lหs.ul[2], Lหs.g[:] = lb, ๐.(๐)
else
@debug "After RLT_BCA, (sub)lb = $(Lหs.ul[2]) < $(Lหs.ul[1]) = (sub)ub"
break
end
end
๐ผหs.ul[1], ๐ผหs.z[:] = JuMP.objective_value(๐ผ) + Lหs.ul[1], z # The feasible value and solution at initialization
JuMP.@expression(๐ผ, common_expr, JuMP.objective_function(๐ผ)); JuMP.set_objective_coefficient(๐ผ, ๐, 1); # โ
a one-shot turn on
set_๐ณ_obj(z, Lหs.g); solve_to_normality(๐ณ); the_first_cut = JuMP.@constraint(๐ผ, ๐ >= sm(๐.(๐น), g2d(Lหs.g)) - sm(๐.(๐ธ), ๐ฃ))
@info "๐ฃ Start Phase 1: adding some cuts for the continuous relaxation of the master problem"
from_LP_cut_vec = JuMP.ConstraintRef[]; push!(from_LP_cut_vec, the_first_cut)
solve_to_normality(๐ผ); # โ
This is indeed a regular solve
๐ผหs.ul[2], z = JuMP.objective_bound(๐ผ), ๐.(๐ฃ);
@info "The 1st (global)lb = $(๐ผหs.ul[2]) < $(๐ผหs.ul[1]) = (global)ub, then we start the main loop of Phase 1"
t0 = time()
for ite = 1:typemax(Int)
global z, Lหs, t0
set_๐ป_obj(z); Lหs = (ul = [Inf, -Inf], g = Vector{Float64}(undef, N)); solve_to_normality(๐ป);
Lหs.ul[1], Lหs.g[:] = JuMP.objective_bound(๐ป), ๐.(๐ ) # setting upper bound is one-off, get a heuristic primal solution
ub = ๐(common_expr) + Lหs.ul[1]
if ub < ๐ผหs.ul[1]
๐ผหs.ul[1], ๐ผหs.z[:] = ub, z
@info "ite = $ite โถ $(๐ผหs.ul[2]) < $(๐ผหs.ul[1]) โช"
else
@info "ite = $ite โถ $(๐ผหs.ul[2]) < $(๐ผหs.ul[1]) = ubs | ub = $ub"
end
while true
set_๐ณ_obj(z, Lหs.g); solve_to_normality(๐ณ);
set_๐ถ_obj(z, ๐.(๐ธ), ๐.(๐น)); solve_to_normality(๐ถ);
lb = JuMP.objective_value(๐ถ)
if lb > Lหs.ul[2]
Lหs.ul[2], Lหs.g[:] = lb, ๐.(๐)
else
@debug "After RLT_BCA, (sub)lb = $(Lหs.ul[2]) < $(Lหs.ul[1]) = (sub)ub"
break
end
end
set_๐ณ_obj(z, Lหs.g); solve_to_normality(๐ณ); cn, pz = sm(๐.(๐น), g2d(Lหs.g)), -๐.(๐ธ)
if ๐(๐) < cn + sm(pz, z) - COT
push!(from_LP_cut_vec, JuMP.@constraint(๐ผ, ๐ >= cn + sm(pz, ๐ฃ)))
else
@info "โถ Phase 1: After $(time() - t0) seconds, we find $(length(from_LP_cut_vec)) valid cuts"
break
end
solve_to_normality(๐ผ); # โ
This is indeed a regular solve
๐ผหs.ul[2], z = JuMP.objective_bound(๐ผ), ๐.(๐ฃ);
end
@info "๐ฃ Start Phase 2: employ the callback+lazy_cut method for MIP"
JuMP.unset_silent(๐ผ)
set_user_cut.(๐ผ, from_LP_cut_vec)
JuMP.set_binary.(๐ข); ๐ผหs = (ul = [Inf, -Inf], z = Vector{Float64}(undef, M))
JuMP.set_attribute(๐ผ, "LazyConstraints", 1)
JuMP.MOI.set(๐ผ, Gurobi.CallbackFunction(), mip_cb_function)
solve_to_normality(๐ผ) # MIP with callback_lazyCut
@info "โถ Phase 2:After $(JuMP.solve_time(๐ผ)) seconds, (global)lb = $(JuMP.objective_bound(๐ผ)) < $(๐ผหs.ul[1]) = (global)ub"
- Directly executing my code above is โwith Phase 1โ, which takes โค 1 min to return to REPL.
- To revert to the conventional algorithm, just delete the
set_user_cut
line, and delete the code block of ๐ฃ Start Phase 1
. This way it takes โฅ 1 hour to return to REPL.
Yet the convergence quality (in terms of lb
and ub
) of the 2 methods are almost at the same level. (I am not expecting exact convergence, since the application is a 2SARO, which has a NP hard subproblem in it. I adopted an approximate algorithmโRLT-BCAโto generate Bendersโ cut.)
Update: I find that the constraint attribute Lazy
can be modified from -1
(user cut) to 1
/2
/3
(lazy cut). It will be faster at 1
, and even faster at mode 2
and 3
.