The inappropriateness of applying RLT to solve the DBP in ARO who lacks RCR

Adaptive robust optimization (ARO) is typically NP-hard.
It’s known that in the RHS uncertainty setting, the subproblem constitutes a disjointed bilinear programming (DBP).
A naive way to solve DBP is employing Gurobi’s MINLP capability.
Nonetheless, the computational performance is unsatisfying.
Relaxation linearization technique (RLT) might be applicable, but it can be questionable when we do NOT have relatively complete recourse (RCR).
The reason is told in this tutorial, in which the example is from doi.org/10.1016/j.orl.2013.05.003.

import JuMP, MosekTools, Ipopt
import Gurobi; const GRB_ENV = Gurobi.Env();
import LinearAlgebra.tr as tr; function sm(p, x) return sum(p .* x) end
# The inappropriateness of applying RLT to solve the DBP in ARO who lacks RCR
function optimise(m) return (JuMP.optimize!(m); (JuMP.termination_status(m), JuMP.primal_status(m))) end
macro set_objective_function(m, f) return esc(:(JuMP.set_objective_function($m, JuMP.@expression($m, $f)))) end
function Model(solver, mode)
    solver == "Ipopt"  && (m = JuMP.Model(Ipopt.Optimizer))
    solver == "Mosek"  && (m = JuMP.Model(() -> MosekTools.Optimizer()))
    solver == "Gurobi" && (m = JuMP.Model(() -> Gurobi.Optimizer(GRB_ENV)))
    occursin("m", mode) && JuMP.set_objective_sense(m, JuMP.MIN_SENSE)
    occursin("M", mode) && JuMP.set_objective_sense(m, JuMP.MAX_SENSE)
    occursin("s", mode) && JuMP.set_silent(m)
    m
end
macro opt_ass_opt(m)
    n = string(m, ": ")
    return esc(quote
        let (s, p) = optimise($m)
            (
                s == ifelse(JuMP.solver_name($m) == "Ipopt", JuMP.LOCALLY_SOLVED, JuMP.OPTIMAL)
                && p == JuMP.FEASIBLE_POINT
            )   || error($n * string(s))
        end
    end)
end
# 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
C_x, C_y, C_z, B_d = let # Data
    [22 33 24; 33 23 30; 20 25 27], [400, 414, 326], [18, 25, 20], [206, 274, 220]
end
st1 = Model("Mosek", "ms") # The 1st-stage problem who picks a trial `z`
JuMP.@variable(st1, st1_y[1:3], Bin)
JuMP.@variable(st1, st1_z[1:3] >= 0); JuMP.@constraint(st1, st1_z .<= 800 * st1_y)
@set_objective_function(st1, sm(C_y, st1_y) + sm(C_z, st1_z))
function stage2_problem_primal(z, d)
    st2 = Model("Mosek", "ms")
    JuMP.@variable(st2, x[i = 1:3, j = 1:3] >= 0)
    JuMP.@constraint(st2, DI[i = 1:3], z[i]         >= sum(x[i, :]))
    JuMP.@constraint(st2, DJ[j = 1:3], sum(x[:, j]) >= d[j]        )
    @set_objective_function(st2, sm(C_x, x))
    @opt_ass_opt(st2)
    return JuMP.objective_value(st2)
end
function stage2_problem_dual(z, d)
    Dst2 = Model("Mosek", "Ms")
    JuMP.@variable(Dst2, DI[i = 1:3] >= 0)
    JuMP.@variable(Dst2, DJ[j = 1:3] >= 0)
    JuMP.@constraint(Dst2, x[i = 1:3, j = 1:3], C_x[i, j] + DI[i] - DJ[j] >= 0)
    @set_objective_function(Dst2, sm(DJ, d) - sm(DI, z))
    @opt_ass_opt(Dst2)
    return JuMP.objective_value(Dst2)
end
function slacked_stage2_problem_primal(z, d)
    sl2 = Model("Mosek", "ms")
    JuMP.@variable(sl2, s[i = 1:3] >= 0) # nonnegative slack variable
    JuMP.@variable(sl2, x[i = 1:3, j = 1:3] >= 0)
    JuMP.@constraint(sl2, Ds[i = 1:3], z[i] + s[i]  >= sum(x[i, :]))
    JuMP.@constraint(sl2, DJ[j = 1:3], sum(x[:, j]) >= d[j]        )
    @set_objective_function(sl2, sum(s))
    @opt_ass_opt(sl2)
    return JuMP.objective_value(sl2)
end
function slacked_stage2_problem_dual_objf(z, d, DJ, Ds) return sm(DJ, d) - sm(Ds, z) end
function slacked_stage2_problem_dual(z, d)
    Dsl2 = Model("Mosek", "Ms")
    JuMP.@variable(Dsl2, 0 <= Ds[i = 1:3] ≤ 1) # "≤ 1" ⇐ [s >= 0]
    JuMP.@variable(Dsl2, DJ[j = 1:3] >= 0)
    JuMP.@constraint(Dsl2, x[i = 1:3, j = 1:3], Ds[i] - DJ[j] >= 0)
    @set_objective_function(Dsl2, slacked_stage2_problem_dual_objf(z, d, DJ, Ds))
    @opt_ass_opt(Dsl2)
    objval = JuMP.objective_value(Dsl2)
    cn = JuMP.value(sm(DJ, d))
    pz = JuMP.value.(-Ds)
    return objval, cn, pz
end
function slacked_subproblem(z) # cheat via Gurobi
    BL = Model("Gurobi", "Ms") # disjointed bilinear program
    JuMP.@variable(BL, 0 <= g[1:3] <= 1)
    JuMP.@constraint(BL, g[1] + g[2] <= 1.2); JuMP.@constraint(BL, sum(g) <= 1.8);
    JuMP.@expression(BL, d, B_d .+ 40 * g);
    # 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
    JuMP.@variable(BL, 0 <= Ds[i = 1:3] ≤ 1) # "≤ 1" ⇐ [s >= 0]
    JuMP.@variable(BL, DJ[j = 1:3] >= 0)
    JuMP.@constraint(BL, x[i = 1:3, j = 1:3], Ds[i] - DJ[j] >= 0)
    # 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
    @set_objective_function(BL, slacked_stage2_problem_dual_objf(z, d, DJ, Ds))
    @opt_ass_opt(BL)
    @info JuMP.value.(d) # the most adverse scene
    JuMP.objective_bound(BL) # if this >0, then the arg `z` should then be cut off by a feas. cut
end
function slacked_subproblem(z, g0, Ds0, DJ0) # suffix `0` means start values for the variables
    BL = Model("Ipopt", "Ms")
    JuMP.@variable(BL, 0 <= g[1:3] <= 1)
    JuMP.@constraint(BL, g[1] + g[2] <= 1.2); JuMP.@constraint(BL, sum(g) <= 1.8);
    JuMP.@expression(BL, d, B_d .+ 40 * g);
    # 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
    JuMP.@variable(BL, 0 <= Ds[i = 1:3] ≤ 1) # "≤ 1" ⇐ [s >= 0]
    JuMP.@variable(BL, DJ[j = 1:3] >= 0)
    JuMP.@constraint(BL, x[i = 1:3, j = 1:3], Ds[i] - DJ[j] >= 0)
    # 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
    @set_objective_function(BL, slacked_stage2_problem_dual_objf(z, d, DJ, Ds))
    JuMP.set_start_value.(g, g0)
    JuMP.set_start_value.(Ds, Ds0)
    JuMP.set_start_value.(DJ, DJ0)
    @opt_ass_opt(BL) # local optimal
    JuMP.objective_value(BL), JuMP.value.(d) # Ipopt can only provide a primal value
end
function slacked_subproblem(z, ::String) # TODO the dual bound is very loose currently due to partial RLT
    CR = Model("Mosek", "Ms")
    JuMP.@variable(CR, 0 <= g[1:3] <= 1)
    JuMP.@constraint(CR, g[1] + g[2] <= 1.2); JuMP.@constraint(CR, sum(g) <= 1.8);
    JuMP.@expression(CR, d, B_d .+ 40 * g);
    # 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
    JuMP.@variable(CR, 0 <= Ds[i = 1:3] ≤ 1) # "≤ 1" ⇐ [s >= 0]
    JuMP.@variable(CR, DJ[j = 1:3] >= 0)
    JuMP.@constraint(CR, x[i = 1:3, j = 1:3], Ds[i] - DJ[j] >= 0)
    # 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
    JuMP.@variable(CR, 0 <= gDJ[1:3, 1:3] <= 1)
    @set_objective_function(CR, sm(DJ, B_d) + 40 * tr(gDJ) - sm(Ds, z))
    @opt_ass_opt(CR)
    db = JuMP.objective_bound(CR)
    pb, dt = slacked_subproblem(z, JuMP.value.(g), JuMP.value.(Ds), JuMP.value.(DJ)) # via Ipopt
    @info "lb = $pb < $db = ub"
    return pb, db, dt
end

@opt_ass_opt(st1)
zt = JuMP.value.(st1_z) # generate the first trial `z`
pb, db, dt = slacked_subproblem(zt, "CR") # we see lb = 772, thus this `zt` need to be cut off by a feas. cut
# the following 2 lines add the feas. cut
_, cn, cz = slacked_stage2_problem_dual(zt, dt)
JuMP.@constraint(st1, cn + sm(cz, st1_z) <= 0)

@opt_ass_opt(st1)
zt = JuMP.value.(st1_z) # generate the second trial `z`
pb, db, dt = slacked_subproblem(zt, "CR") # we see lb = 7.653811934460464e-6 < 120.0 = ub
# this result is uninformative in that
# 1. lb is not strictly positive
# 2. ub is not zero
# 3. since the gap is not closed, `dt` may not be optimal

# By comparison
slacked_subproblem(zt) # if we cheat via Gurobi, we get an ub = 0.000238, which in this example can be deemed zero
# This suggest that this trial `zt` is essentially feasible, then we could leave the "slacked" 2nd-stage problem

# TODO write the 3 `subproblem::Function` resembling `slacked_subproblem`
# use the dual bound to update upper bound, and use the primal feas. solution to add opt. cut to update lower bound

If we can ensure RCR. The LP-relaxation algorithm for this 2SARO problem is

import JuMP, Gurobi; GRB_ENV = Gurobi.Env();
import SparseArrays, Random; import LinearAlgebra.diag as diag
# Two-stage robust location-transportation with RCR
function sm(p, x) return sum(p .* x) end;
function optimise(m)
    JuMP.optimize!(m)
    return (
        JuMP.termination_status(m),
        JuMP.primal_status(m),
        JuMP.dual_status(m)
    )
end;
macro set_objective_function(m, f) return esc(:(JuMP.set_objective_function($m, JuMP.@expression($m, $f)))) end;
function Model(name) # e.g. "st2_lms" or "st1_bMD" or "supp_lMs"
    m = JuMP.Model(() -> Gurobi.Optimizer(GRB_ENV))
    JuMP.MOI.set(m, JuMP.MOI.Name(), name)
    s = name[end-1]
    s == 'm' && JuMP.set_objective_sense(m, JuMP.MIN_SENSE)
    s == 'M' && JuMP.set_objective_sense(m, JuMP.MAX_SENSE)
    last(name) == 's' && JuMP.set_silent(m)
    m
end;
function solve_to_normality(m)
    t, p, d = optimise(m)
    name = JuMP.name(m)
    t == JuMP.OPTIMAL || error("$name: $(string(t))")
    p == JuMP.FEASIBLE_POINT || error("$name: (primal) $(string(p))")
    (name[end-2] == 'l' && d != p) && error("$name: (dual) $(string(d))")
end;
function safe_upper_bound(ObjBnd) return max(0., ObjBnd + 9.99, 1.01 * ObjBnd) end;
function stage2_problem_primal(z, d)
    st2 = Model("st2_primal_lms")
    JuMP.@variable(st2, x[i = 1:M, j = 1:N] ≥ 0)
    JuMP.@variable(st2, ζ[i = 1:M] ≥ 0) # an expensive substitute for `z`
    JuMP.@constraint(st2, DI[i = 1:M], z[i] + ζ[i]  ≥ sum(x[i, :]))
    JuMP.@constraint(st2, DJ[j = 1:N], sum(x[:, j]) ≥ d[j]        )
    @set_objective_function(st2, sm(R_x, x) + sm(R_ζ, ζ))
    solve_to_normality(st2)
    return JuMP.objective_value(st2)
end;
function st2_dual_obj_f(z, d, DI, DJ) return sm(DJ, d) - sm(DI, z) end; # 1st-stage decision; scene; 2nd-stage dual decision
function stage2_problem_dual(z, d) # ✅
    st2 = Model("st2_dual_lMs")
    JuMP.@variable(st2, 0 <= DI[i = 1:M] <= R_ζ[i]) # c3 and c4 (in R2)
    JuMP.@variable(st2, 0 <= DJ[j = 1:N]) # c2 (in R2)
    JuMP.@constraint(st2, x[i = 1:M, j = 1:N], R_x[i, j] + DI[i] - DJ[j] >= 0) # c1 (in R2)
    @set_objective_function(st2, st2_dual_obj_f(z, d, DI, DJ))
    solve_to_normality(st2)
    return JuMP.objective_value(st2), JuMP.value.(DI), JuMP.value.(DJ)
end;
function get_st1_oz_cut(z, d) # `d` is the [quasi]-most-hazardous scene
    _, DI, DJ = stage2_problem_dual(z, d)
    return sm(DJ, d), -DI # cn, pz
end;
function g2d(g) return B_d + V_d .* g end;
function g_2_DI_DJ(z, g) return stage2_problem_dual(z, g2d(g)) end;
function DI_DJ_2_g(z, DI, DJ)
    oppo = Model("oppo_lMs")
    JuMP.@variable(oppo, 0 ≤ g[1:N] ≤ 1) # r2 and r3 (in R1)
    JuMP.@constraint(oppo, C_g_poly * g .≤ b_g_poly) # r1 (in R1)
    @set_objective_function(oppo, st2_dual_obj_f(z, g2d(g), DI, DJ))
    solve_to_normality(oppo)
    return JuMP.objective_value(oppo), JuMP.value.(g)
end;
function improve_g_by_BCA(z, g) # Block Coordinate Ascent
    _, DI, DJ    = g_2_DI_DJ(z, g)
    return lb, g = DI_DJ_2_g(z, DI, DJ)
end;
function get_A1_A2() # A1 is the uncertainty's; A2 is the st2_dual's
    A1 = SparseArrays.spzeros(R1, 1 + C1); A2 = SparseArrays.spzeros(R2, 1 + C2)
    A1[eachindex(b_g_poly), 1] .= b_g_poly; A1[eachindex(b_g_poly), 2:end] .= -C_g_poly # r1
    A1[length(b_g_poly)       .+ (1:N), 2:end] .=  SparseArrays.spdiagm(ones(N)) # r2
    A1[(length(b_g_poly) + N) .+ (1:N), 2:end] .= -SparseArrays.spdiagm(ones(N)) # r3
    A1[(length(b_g_poly) + N) .+ (1:N), 1] .= 1 # r3
    for i in 1:M, j in 1:N # c1
        A2[N*(i-1)+j, [1, 1+ i, (1+M)+ j]] .= [R_x[i, j], 1, -1]
    end
    A2[M*N .+ (1:N), (1+M) .+ (1:N)]    .=  SparseArrays.spdiagm(ones(N)); # c2
    A2[(M*N + N) .+ (1:M), 1 .+ (1:M)]  .=  SparseArrays.spdiagm(ones(M)); # c3
    A2[(M*N + N + M) .+ (1:M), 1 .+ (1:M)]  .= -SparseArrays.spdiagm(ones(M)); # c4
    A2[(M*N + N + M) .+ (1:M), 1] .= R_ζ # c4
    return A1, A2
end;
# 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
M, N = 9, 10;
K_y, C_y, C_z, R_x, B_d, V_d, C_g_poly, b_g_poly = let
    R_x = [2.75 12.33 8.58 0.67 13.58 7.33 9.42 5.67 9.83 4.0; 1.08 5.67 6.5 14.42 0.67 4.0 12.75 6.08 4.0 9.42; 4.42 6.92 12.75 4.42 11.5 7.75 6.92 7.33 13.17 11.08; 4.0 11.08 4.0 13.58 12.33 5.25 4.83 4.0 11.5 14.0; 3.17 1.08 11.92 8.58 11.5 1.08 7.75 1.5 5.67 3.17; 3.58 2.75 11.5 2.75 2.33 14.42 4.83 5.67 3.17 10.25; 11.92 9.83 11.92 2.33 2.75 13.17 1.92 2.33 12.75 8.17; 4.0 14.0 4.0 12.33 1.08 9.83 1.92 10.67 5.67 3.58; 14.0 9.83 12.33 14.0 7.33 11.92 8.58 2.75 12.33 10.67]
    C_z = 2 * [4.0, 2.5, 4.0, 4.0, 2.5, 2.5, 3.0, 3.5, 3.0]
    C_y = [10, 12, 10, 9, 12, 9, 12, 9, 11] .* C_z
    K_y = [224, 224, 221, 223, 221, 226, 224, 219, 218]
    B_d = [394, 70, 238, 6, 225, 383, 65, 136, 13, 83]
    V_d = [40, 40, 40, 30, 10, 10, 40, 10, 30, 30]
    C_g_poly = SparseArrays.sparse([1, 4, 5, 1, 2, 3, 6, 1, 2, 5, 6, 3, 5, 2, 6, 1, 5, 1, 2, 5, 6, 2, 4, 6, 4, 6, 3, 4], [1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10], Bool[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 6, 10)
    b_g_poly = [4.41, 3.78, 2.66, 3.5, 4.62, 4.41]
    K_y, C_y, C_z, R_x, B_d, V_d, C_g_poly, b_g_poly
end; R_ζ = 20 * C_z;
R1 = length(b_g_poly) + N + N; C1 = N; # 1 ⇒ uncertainty_side, 2 ⇒ st2_dual side
R2 = M*N + N + M + M;          C2 = M + N;
# 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
st1 = Model("st1_bms"); JuMP.@variable(st1, st1_o);
JuMP.@variables(st1, begin st1_y[1:M], Bin; st1_z[1:M] ≥ 0 end); JuMP.@constraint(st1, st1_z .≤ K_y .* st1_y);
JuMP.set_objective_coefficient(st1, [st1_y; st1_z], [C_y; C_z])
solve_to_normality(st1); z = JuMP.value.(st1_z); # 🍏 initial trial `z`
# 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
best_lpr_ul = zeros(2); best_g = zeros(N); # 📙 global book
A1, A2 = get_A1_A2(); old_feas_val = zeros(1); # if the heuristic solution of BLP reaches a different value, then we deem it new
lpr = Model("lpr_lMs");
JuMP.@variables(lpr, begin lpr_g[1:C1]; lpr_Dv[1:C2]; g_x_Dv[1:C1, 1:C2] end);
JuMP.@expression(lpr, cross_multrix, [1 lpr_Dv'; lpr_g g_x_Dv]); JuMP.@expression(lpr, lpr_DI, lpr_Dv[1:M]);
JuMP.@expression(lpr, lpr_DJ, lpr_Dv[M .+ (1:N)]); JuMP.@expression(lpr, g_x_DJ, g_x_Dv[:, M .+ (1:N)]);
JuMP.@constraint(lpr,      [A1 * [1; lpr_g]; A2 * [1; lpr_Dv]] .>= 0); # [uncertainty_side; st2_dual_side] constraints
JuMP.@constraint(lpr, c_rlt[r in 1:R1, c in 1:R2], sm(A1[r, :] * transpose(A2[c, :]), cross_multrix) >= 0); # 📗
@set_objective_function(lpr, sm(B_d, lpr_DJ) + sm(V_d, diag(g_x_DJ))) # common static part
nothing; JuMP.set_objective_coefficient(lpr, lpr_DI, -z); # 🟡 1st-decision dependent
solve_to_normality(lpr); c_safe_ub = JuMP.@constraint(lpr, safe_upper_bound(JuMP.objective_bound(lpr)) >= JuMP.objective_function(lpr))
let # 🍅🍅🍅
    solve_to_normality(lpr)
    best_lpr_ul[1] = JuMP.objective_bound(lpr)
    g = JuMP.value.(lpr_g) # get a heuristic solution from `lpr`
    old_feas_val[1] = st2_dual_obj_f(z, g2d(g), JuMP.value.(lpr_DI), JuMP.value.(lpr_DJ)) # record the old value to help discovering new heuristic solution
    lb, g = improve_g_by_BCA(z, g)
    best_lpr_ul[2] = lb; best_g .= g
end;
JuMP.@expression(st1, common_expr, JuMP.objective_function(st1)); JuMP.set_objective_coefficient(st1, st1_o, 1); # 1️⃣ a one-shot turn on
cn, pz = get_st1_oz_cut(z, g2d(best_g)); JuMP.@constraint(st1, st1_o >= cn + sm(pz, st1_z))
solve_to_normality(st1); z = JuMP.value.(st1_z) # 🍏 ✅ At this line, the 1st-stage is auto bounded
# ✅ Make preparation for the main loop
# 🍅 To simplify, I currently don't remember the best-solution-so-far
best_ul = Inf * ones(2); best_z = deepcopy(z); # 📙 global book
best_ul[2], common_cost = JuMP.objective_bound(st1), JuMP.value(common_expr)
for ite in 1:9
    JuMP.delete(lpr, c_safe_ub); JuMP.set_objective_coefficient(lpr, lpr_DI, -z); # 🟡 1st-decision dependent
    solve_to_normality(lpr); c_safe_ub = JuMP.@constraint(lpr, safe_upper_bound(JuMP.objective_bound(lpr)) >= JuMP.objective_function(lpr))
    let # 🍅🍅🍅
        solve_to_normality(lpr)
        best_lpr_ul[1] = JuMP.objective_bound(lpr)
        g = JuMP.value.(lpr_g) # get a heuristic solution from `lpr`
        old_feas_val[1] = st2_dual_obj_f(z, g2d(g), JuMP.value.(lpr_DI), JuMP.value.(lpr_DJ)) # record the old value to help discovering new heuristic solution
        lb, g = improve_g_by_BCA(z, g)
        best_lpr_ul[2] = lb; best_g .= g
    end
    @debug "see the performance of RLT" best_lpr_ul
    ub = common_cost + best_lpr_ul[1] # the new feasible value
    if ub < best_ul[1]
        best_ul[1] = ub
        best_z .= z
        @info "ite = $ite ▶ $(best_ul[2]) < $(best_ul[1]) ✪"
    else
        @info "ite = $ite ▶ $(best_ul[2]) < $(best_ul[1]) = ubs | ub = $ub"
    end
    cn, pz = get_st1_oz_cut(z, g2d(best_g)); JuMP.@constraint(st1, st1_o >= cn + sm(pz, st1_z))
    solve_to_normality(st1); z = JuMP.value.(st1_z) # 🍏
    best_ul[2], common_cost = JuMP.objective_bound(st1), JuMP.value(common_expr)
end
import JuMP, Gurobi; GRB_ENV = Gurobi.Env();
import SparseArrays, Random; import LinearAlgebra.diag as diag

function sm(p, x) return sum(p .* x) end;
function optimise(m)
    JuMP.optimize!(m)
    return (
        JuMP.termination_status(m),
        JuMP.primal_status(m),
        JuMP.dual_status(m)
    )
end;
macro set_objective_function(m, f) return esc(:(JuMP.set_objective_function($m, JuMP.@expression($m, $f)))) end;
function Model(name) # e.g. "st2_lms" or "st1_bMD" or "supp_lMs"
    m = JuMP.Model(() -> Gurobi.Optimizer(GRB_ENV))
    JuMP.MOI.set(m, JuMP.MOI.Name(), name); s = name[end-1]
    s == 'm' && JuMP.set_objective_sense(m, JuMP.MIN_SENSE)
    s == 'M' && JuMP.set_objective_sense(m, JuMP.MAX_SENSE)
    last(name) == 's' && JuMP.set_silent(m)
    m
end;
function solve_to_normality(m)
    t, p, d = optimise(m)
    name = JuMP.name(m)
    t == JuMP.OPTIMAL || error("$name: $(string(t))")
    p == JuMP.FEASIBLE_POINT || error("$name: (primal) $(string(p))")
    (name[end-2] == 'l' && d != p) && error("$name: (dual) $(string(d))")
end;
function safe_upper_bound(ObjBnd) return max(0., ObjBnd + 9.99, 1.01 * ObjBnd) end;
function stage2_problem_primal(z, d)
    st2 = Model("st2_primal_lms")
    JuMP.@variable(st2, x[i = 1:M, j = 1:N] ≥ 0)
    JuMP.@variable(st2, ζ[i = 1:M] ≥ 0) # an expensive substitute for `z`
    JuMP.@constraint(st2, DI[i = 1:M], z[i] + ζ[i]  ≥ sum(x[i, :]))
    JuMP.@constraint(st2, DJ[j = 1:N], sum(x[:, j]) ≥ d[j]        )
    @set_objective_function(st2, sm(R_x, x) + sm(R_ζ, ζ))
    solve_to_normality(st2)
    return JuMP.objective_value(st2)
end;
function st2_dual_obj_f(z, d, DI, DJ) return sm(DJ, d) - sm(DI, z) end; # 1st-stage decision; scene; 2nd-stage dual decision
function stage2_problem_dual(z, d) # ✅
    st2 = Model("st2_dual_lMs")
    JuMP.@variable(st2, 0 <= DI[i = 1:M] <= R_ζ[i]) # c3 and c4 (in R2)
    JuMP.@variable(st2, 0 <= DJ[j = 1:N]) # c2 (in R2)
    JuMP.@constraint(st2, x[i = 1:M, j = 1:N], R_x[i, j] + DI[i] - DJ[j] >= 0) # c1 (in R2)
    @set_objective_function(st2, st2_dual_obj_f(z, d, DI, DJ))
    solve_to_normality(st2)
    return JuMP.objective_value(st2), JuMP.value.(DI), JuMP.value.(DJ)
end;
function get_st1_oz_cut(z, d) # `d` is the [quasi]-most-hazardous scene
    _, DI, DJ = stage2_problem_dual(z, d)
    return sm(DJ, d), -DI # cn, pz
end;
function g2d(g) return B_d + V_d .* g end;
function g_2_DI_DJ(z, g) return stage2_problem_dual(z, g2d(g)) end;
function DI_DJ_2_g(z, DI, DJ)
    oppo = Model("oppo_lMs")
    JuMP.@variable(oppo, 0 ≤ g[1:N] ≤ 1) # r2 and r3 (in R1)
    JuMP.@constraint(oppo, C_g_poly * g .≤ b_g_poly) # r1 (in R1)
    @set_objective_function(oppo, st2_dual_obj_f(z, g2d(g), DI, DJ))
    solve_to_normality(oppo)
    return JuMP.objective_value(oppo), JuMP.value.(g)
end;
function improve_g_by_BCA(z, g) # Block Coordinate Ascent
    _, DI, DJ    = g_2_DI_DJ(z, g)
    return lb, g = DI_DJ_2_g(z, DI, DJ)
end;
function get_A1_A2() # A1 is the uncertainty's; A2 is the st2_dual's
    A1 = SparseArrays.spzeros(R1, 1 + C1); A2 = SparseArrays.spzeros(R2, 1 + C2)
    A1[eachindex(b_g_poly), 1] .= b_g_poly; A1[eachindex(b_g_poly), 2:end] .= -C_g_poly # r1
    A1[length(b_g_poly)       .+ (1:N), 2:end] .=  SparseArrays.spdiagm(ones(N)) # r2
    A1[(length(b_g_poly) + N) .+ (1:N), 2:end] .= -SparseArrays.spdiagm(ones(N)) # r3
    A1[(length(b_g_poly) + N) .+ (1:N), 1] .= 1 # r3
    for i in 1:M, j in 1:N # c1
        A2[N*(i-1)+j, [1, 1+ i, (1+M)+ j]] .= [R_x[i, j], 1, -1]
    end
    A2[M*N .+ (1:N), (1+M) .+ (1:N)]    .=  SparseArrays.spdiagm(ones(N)); # c2
    A2[(M*N + N) .+ (1:M), 1 .+ (1:M)]  .=  SparseArrays.spdiagm(ones(M)); # c3
    A2[(M*N + N + M) .+ (1:M), 1 .+ (1:M)]  .= -SparseArrays.spdiagm(ones(M)); # c4
    A2[(M*N + N + M) .+ (1:M), 1] .= R_ζ # c4
    return A1, A2
end;
# 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
M, N = 69, 100; # it was (9, 10)
# C_g_poly = SparseArrays.sparse([1, 4, 6, 3, 4, 6, 1, 4, 7, 8, 2, 4, 5, 6, 7, 8, 4, 8, 1, 3, 8, 2, 4, 5, 6, 3, 1, 2, 6, 8, 1, 2, 4, 5, 7, 3, 5, 6, 7, 2, 3, 5, 7, 2, 4, 5, 6, 8, 2, 3, 4, 5, 8, 2, 6, 7, 1, 3, 1, 2, 3, 4, 5, 7, 8, 2, 5, 1, 6, 7, 5, 6, 8, 1, 2, 4, 5, 6, 7, 1, 2, 3, 4, 8, 1, 2, 3, 4, 5, 6, 8, 3, 4, 5, 1, 2, 5, 7, 2, 4, 7, 1, 2, 6, 7, 8, 5, 8, 3, 8, 1, 5, 1, 8, 3, 4, 6, 8, 1, 2, 3, 6, 1, 3, 8, 1, 2, 6, 7, 4, 5, 6, 8, 3, 5, 6, 8, 2, 6, 7, 4, 6, 7, 8, 5, 8, 2, 3, 7, 4, 7, 1, 2, 3, 4, 7, 1, 2, 4, 5, 7, 8, 4, 5, 8, 1, 3, 5, 8, 1, 4, 6, 7, 8, 1, 6, 3, 4, 5, 6, 7, 1, 5, 7, 1, 2, 5, 8, 1, 3, 8, 4, 5, 6, 7, 3, 5, 8, 1, 2, 3, 4, 5, 7, 8, 6, 1, 2, 4, 5, 6, 7, 8, 1, 2, 3, 7, 2, 4, 8, 2, 3, 4, 5, 6, 8, 5, 7, 8, 2, 3, 5, 1, 3, 4, 5, 6, 8, 1, 4, 5, 4, 6, 7, 3, 6, 1, 2, 3, 6, 8, 1, 3, 4, 6, 7, 4, 6, 8, 2, 4, 5, 3, 5, 8, 1, 2, 4, 7, 8, 1, 3, 6, 3, 5, 6, 8, 2, 4, 5, 7, 8, 1, 2, 4, 5, 7, 8, 1, 2, 3, 4, 5, 6, 2, 3, 5, 3, 5, 6, 7, 8, 2, 4, 6, 1, 6, 1, 3, 4, 6, 8, 4, 7, 1, 2, 4, 5, 6, 7, 8, 2, 3, 4, 1, 2, 3, 5, 6, 8, 1, 3, 4, 8, 2, 4, 5, 7, 3, 8, 2, 4, 7, 4, 7, 8, 2, 3, 4, 6, 8, 1, 2, 6, 7, 6, 8, 2, 6, 8, 1, 2, 4, 6, 7, 8, 1, 4, 6, 8, 3, 4, 5, 6, 8], [1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 15, 15, 15, 16, 16, 17, 17, 17, 17, 17, 17, 17, 18, 18, 19, 19, 19, 20, 20, 20, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26, 27, 27, 27, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 32, 32, 33, 33, 33, 33, 34, 34, 34, 35, 35, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 39, 39, 39, 39, 40, 40, 41, 41, 41, 42, 42, 43, 43, 43, 43, 43, 44, 44, 44, 44, 44, 44, 45, 45, 45, 46, 46, 46, 46, 47, 47, 47, 47, 47, 48, 48, 49, 49, 49, 49, 49, 50, 50, 50, 51, 51, 51, 51, 52, 52, 52, 53, 53, 53, 53, 54, 54, 54, 55, 55, 55, 55, 55, 55, 55, 56, 57, 57, 57, 57, 57, 57, 57, 58, 58, 58, 58, 59, 59, 59, 60, 60, 60, 60, 61, 61, 62, 62, 62, 63, 63, 63, 64, 64, 64, 64, 64, 64, 65, 65, 65, 66, 66, 66, 67, 67, 68, 68, 68, 68, 68, 69, 69, 69, 69, 69, 70, 70, 70, 71, 71, 71, 72, 72, 72, 73, 73, 73, 73, 73, 74, 74, 74, 75, 75, 75, 75, 76, 76, 76, 76, 76, 77, 77, 77, 77, 77, 77, 78, 78, 78, 78, 78, 78, 79, 79, 79, 80, 80, 80, 80, 80, 81, 81, 81, 82, 82, 83, 83, 83, 83, 83, 84, 84, 85, 85, 85, 85, 85, 85, 85, 86, 86, 86, 87, 87, 87, 87, 87, 87, 88, 88, 88, 88, 89, 89, 89, 89, 90, 90, 91, 91, 91, 92, 92, 92, 93, 93, 93, 93, 93, 94, 94, 94, 94, 95, 95, 96, 96, 96, 97, 97, 97, 98, 98, 98, 99, 99, 99, 99, 100, 100, 100, 100, 100], Bool[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 8, 100);
# b_g_poly = [40.5, 35.88, 37.41, 46.8, 38.64, 41.16, 32.4, 48.6];
C_g_poly = ones(1, N)
b_g_poly = N/2 * ones(1)
B_d = [0.11, 0.57, 0.77, 0.35, 0.49, 0.25, 0.25, 0.45, 0.05, 0.77, 0.39, 0.83, 0.47, 0.55, 0.43, 0.47, 0.29, 0.49, 0.47, 0.75, 0.09, 0.59, 0.11, 0.09, 0.11, 0.49, 0.29, 0.61, 0.23, 0.53, 0.15, 0.85, 0.17, 0.23, 0.25, 0.57, 0.65, 0.47, 0.65, 0.11, 0.09, 0.13, 0.57, 0.29, 0.19, 0.39, 0.31, 0.63, 0.61, 0.51, 0.75, 0.41, 0.57, 0.59, 0.47, 0.05, 0.29, 0.49, 0.53, 0.79, 0.21, 0.83, 0.13, 0.31, 0.63, 0.43, 0.05, 0.33, 0.77, 0.43, 0.11, 0.41, 0.19, 0.61, 0.89, 0.43, 0.15, 0.35, 0.43, 0.63, 0.59, 0.89, 0.23, 0.23, 0.33, 0.63, 0.69, 0.35, 0.79, 0.31, 0.69, 0.81, 0.15, 0.19, 0.27, 0.29, 0.17, 0.83, 0.85, 0.45];
V_d = [0.56, 0.26, 0.5, 0.5, 0.71, 0.77, 0.32, 0.53, 0.38, 0.2, 0.2, 0.29, 0.68, 0.59, 0.38, 0.29, 0.38, 0.41, 0.26, 0.35, 0.56, 0.68, 0.8, 0.8, 0.62, 0.44, 0.47, 0.53, 0.68, 0.44, 0.26, 0.35, 0.44, 0.59, 0.59, 0.41, 0.53, 0.8, 0.29, 0.62, 0.29, 0.59, 0.23, 0.56, 0.68, 0.2, 0.44, 0.74, 0.65, 0.62, 0.8, 0.71, 0.35, 0.56, 0.23, 0.44, 0.65, 0.35, 0.38, 0.38, 0.44, 0.47, 0.5, 0.38, 0.74, 0.29, 0.2, 0.29, 0.62, 0.23, 0.41, 0.8, 0.77, 0.2, 0.35, 0.65, 0.32, 0.23, 0.74, 0.38, 0.68, 0.56, 0.59, 0.32, 0.8, 0.74, 0.38, 0.2, 0.5, 0.68, 0.74, 0.8, 0.68, 0.53, 0.44, 0.59, 0.26, 0.68, 0.44, 0.38];
# @assert sum(K_y) >= sum(g2d(ones(100)))
K_y = [2.42, 2.42, 1.78, 2.7, 2.06, 1.78, 1.14, 3.5, 3.1, 2.02, 1.82, 3.42, 2.42, 0.9, 3.06, 1.18, 1.7, 3.46, 1.46, 1.62, 1.26, 1.3, 3.46, 2.66, 1.82, 2.34, 1.54, 1.82, 2.58, 2.9, 2.66, 3.14, 2.5, 1.62, 2.78, 3.3, 1.38, 1.26, 2.74, 1.82, 3.14, 1.38, 2.74, 0.94, 3.06, 3.22, 2.62, 2.42, 3.02, 3.18, 1.58, 2.7, 3.06, 1.7, 3.22, 2.74, 2.54, 3.26, 1.3, 2.38, 3.3, 1.02, 2.42, 2.38, 3.1, 1.3, 2.02, 0.94, 3.46];
Random.seed!(1); R_x = rand(0.3:.00117:8.7, M, N);
C_z = rand(2:0.00117:5, M);
C_y = rand(9:0.017:15, M) .* C_z;
R_ζ = 20 * C_z;
R1 = length(b_g_poly) + N + N; C1 = N; # 1 ⇒ uncertainty_side, 2 ⇒ st2_dual side
R2 = M*N + N + M + M;          C2 = M + N;
# 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
st1 = Model("st1_bms"); JuMP.@variable(st1, st1_o);
JuMP.@variables(st1, begin st1_y[1:M], Bin; st1_z[1:M] ≥ 0 end); JuMP.@constraint(st1, st1_z .≤ K_y .* st1_y);
JuMP.set_objective_coefficient(st1, [st1_y; st1_z], [C_y; C_z])
solve_to_normality(st1); z = JuMP.value.(st1_z); # 🍏 initial trial `z`
# 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎 🥎
best_lpr_ul = zeros(2); best_g = zeros(N); # 📙 global book
A1, A2 = get_A1_A2(); old_feas_val = zeros(1); # if the heuristic solution of BLP reaches a different value, then we deem it new
lpr = Model("lpr_lMD");
JuMP.@variables(lpr, begin lpr_g[1:C1]; lpr_Dv[1:C2]; g_x_Dv[1:C1, 1:C2] end);
JuMP.@expression(lpr, cross_multrix, [1 lpr_Dv'; lpr_g g_x_Dv]); JuMP.@expression(lpr, lpr_DI, lpr_Dv[1:M]);
JuMP.@expression(lpr, lpr_DJ, lpr_Dv[M .+ (1:N)]); JuMP.@expression(lpr, g_x_DJ, g_x_Dv[:, M .+ (1:N)]);
JuMP.@constraint(lpr,      [A1 * [1; lpr_g]; A2 * [1; lpr_Dv]] .>= 0); # [uncertainty_side; st2_dual_side] constraints
ite = 0
t0 = time()
for r in eachrow(A1)
    ite += 1
    println("ite = $ite <= 201, time_elapsed = $(time() - t0)")
    JuMP.@constraint(lpr, [c in eachrow(A2)], transpose(r) * cross_multrix * c >= 0)
end

The bottom for-loop is slow.

The bottom for-loop is slow.

Please keep to a single thread for a single topic. We don’t need to split our attention across multiple threads: I added 1 Million constraints to an LP unwittingly - #8 by WalterMadelim

2 Likes

I thought the code might be lengthy.
(only the code in the above post (i.e. block), the upper ones are irrelevant for the current performance discussion)