Columns and Constraints generation

Hello, I’m looking to implement two-stage robust optimization using column and constraint generation. I’ve encountered an issue within while loop related to the optimize!() function when trying to implement the algorithm. Thank you for your help. Here’s the relevant code:

using JuMP
using Gurobi

# Constant creation
f = [400, 414, 326]
a = [18, 25, 20]
C = [22 33 24;
     33 23 30;
     20 25 27]
D = [206 + 40, 274 + 40, 220 + 40]
dl = [206, 274, 220]
du = [40, 40, 40]
k = 0  # Iterative counting

MP = Model(Gurobi.Optimizer)

# Variables
@variable(MP, x[1:3, 1:3] >= 0)
@variable(MP, y[1:length(f)], Bin)
@variable(MP, z[1:length(a)] >= 0)
@variable(MP, d[1:3] >= 0)
@variable(MP, eta >= 0)
@variable(MP, 0<=g[1:3]<=1)

# Objective function
@objective(MP, Min, sum(f[i] * y[i] for i in 1:length(f)) + sum(a[i] * z[i] for i in 1:length(a)) + eta)

# Constraints
@constraint(MP, MP_Cons_1[i in 1:3], z[i] <= 800 * y[i])
@constraint(MP, MP_Cons_2, sum(z[i] for i in 1:3) >= 772)

# Iteration constraints
@constraint(MP, MP_Cons_3[i in 1:3], sum(x[i, j] for j in 1:3) <= z[i])

@constraint(MP, MP_Cons_4[j in 1:3], sum(x[i, j] for i in 1:3) >= d[j])
@constraint(MP, MP_Cons_eta, eta >= sum(x[i, j]*C[i,j] for i in 1:3 for j in 1:3))
# Master-problem uncertainty constraints
@constraint(MP, MP_Cons_uncertainty_1[i in 1:3], d[i] == dl[i] + du[i] * g[i])
@constraint(MP, MP_Cons_uncertainty_2, sum(g[i] for i in 1:3) <= 1.8)
@constraint(MP, MP_Cons_uncertainty_3, sum(g[i] for i in 1:2) <= 1.2)

# Solve the problem
optimize!(MP)

# Get the lower bound
LB = objective_value(MP)

# Display the results if needed
println("Lower Bound: $LB")


SP = Model(optimizer_with_attributes(Gurobi.Optimizer, "OutputFlag" => 1))

# Variables
@variable(SP, x_sub[1:3, 1:3] >= 0)
@variable(SP, d_sub[1:3] >= 0)
@variable(SP, g_sub[1:3], Bin)
@variable(SP, pi[1:6] >= 0)
@variable(SP, v[1:6], Bin)
@variable(SP, w[1:3, 1:3], Bin)
M = 10000

# Objective function
@objective(SP, Max, sum(C[i,j] * x_sub[i, j] for i in 1:3, j in 1:3))
# Constraints
@constraint(SP, SP_Cons_1[i in 1:3], sum(x_sub[i, j] for j in 1:3) <= value.(z[i]))

@constraint(SP, SP_Cons_2[j in 1:3], sum(x_sub[i, j] for i in 1:3) >= d_sub[j])

@constraint(SP, SP_Cons_3[i in 1:3, j in 1:3], - pi[i] + pi[j + 3] <= C[i,j])
# Slack constraints part 1
@constraint(SP, SP_SLACK_CONS_1[i in 1:3], value.(z[i]) - sum(x_sub[i, j] for j in 1:3) <= M * (1 - v[i]))
@constraint(SP, SP_SLACK_CONS_2[j in 1:3], sum(x_sub[i, j] for i in 1:3) - d_sub[j] <= M * (1 - v[j + 3]))
@constraint(SP, SP_SLACK_CONS_3[i in 1:6], pi[i] <= M * v[i])

# Slack constraints part 2
@constraint(SP, SP_SLACK_CONS_4[i in 1:3, j in 1:3], C[i,j] + pi[i] - pi[j + 3] <= M * (1 - w[i, j]))
@constraint(SP, SP_SLACK_CONS_5[i in 1:3, j in 1:3], x_sub[i, j] <= M * w[i, j])

# Uncertainty
@constraint(SP, SP_Cons_uncertainty_1[i in 1:3], d_sub[i] == dl[i] + du[i] * g_sub[i])
@constraint(SP, SP_Cons_uncertainty_2, sum(g_sub[i] for i in 1:3) <= 1.8)
@constraint(SP, SP_Cons_uncertainty_3, sum(g_sub[i] for i in 1:2) <= 1.2)


# Solve the sub-problem
optimize!(SP)

# Get the sub-problem objective value
SP_objval = objective_value(SP)


# Calculate the upper bound
UB = LB - value.(eta) + SP_objval

println("Upper Bound: $UB")

while abs(UB - LB) > 1e-5
    k = k + 1
    # Master-problem
    @variable(MP, x_new[1:3, 1:3] >= 0)
    @constraint(MP, MP_Cons_3_new[i in 1:3], sum(x_new[i, j] for j in 1:3) <= value(z[i]))
    @constraint(MP, MP_Cons_4_new[j in 1:3], sum(x_new[i, j] for i in 1:3) >= value(d_sub[j]))
    @constraint(MP, MP_Cons_eta, eta >= sum(x_new[i, j] * C[i,j] for i in 1:3, j in 1:3))
  optimize!(MP)
    LB = max(LB, objective_value(MP))
    println("Lower Bound: $LB")
    # Sub-problem update
    # Remove old constraints related to z
    for i in 1:3
        delete(MP_Cons_3[i])
        delete(SP_SLACK_CONS_1[i])
    end
    # Add new constraints related to z
    for i in 1:3
        @constraint(MP, MP_Cons_3_new[i], sum(x_sub[i, j] for j in 1:3) <= value(z[i]))
        @constraint(SP, SP_Cons_1[i], sum(x_sub[i, j] for j in 1:3) <= value(z[i]))
        @constraint(SP, SP_SLACK_CONS_1[i], value(z[i]) - sum(x_sub[i, j] for j in 1:3) <= M * (1 - value(v[i])))
    end
   optimize!(SP)
    SP_objval = objective_value(SP)
    UB = LB - value(eta) + SP_objval
    println("Upper Bound: $UB")
end

Hi @Salma, welcome to the forum.

The key part of the output is this warning:

┌ Warning: The model has been modified since the last call to `optimize!` (or `optimize!` has not been called yet). If you are iteratively querying solution information and modifying a model, query all the results first, then modify the model.
└ @ JuMP ~/.julia/dev/JuMP/src/optimizer_interface.jl:694

See this section of the docs: Solutions · JuMP

I don’t think your algorithm is quite right. But here’s how I would write it for now. I left a few TODO which need fixing before it’ll work.

using JuMP
using Gurobi

f = [400, 414, 326]
a = [18, 25, 20]
C = [22 33 24; 33 23 30; 20 25 27]
D = [206 + 40, 274 + 40, 220 + 40]
dl = [206, 274, 220]
du = [40, 40, 40]
k = 0

MP = Model(Gurobi.Optimizer)
@variables(MP, begin
    x[1:3, 1:3] >= 0
    y[1:length(f)], Bin
    z[1:length(a)] >= 0
    d[1:3] >= 0
    eta >= 0
    0 <= g[1:3] <= 1
end)
@objective(MP, Min, f' * y + a' * z + eta)
@constraints(MP, begin
    [i in 1:3], z[i] <= 800 * y[i]
    sum(z) >= 772
    MP_Cons_3[i in 1:3], sum(x[i, :]) <= z[i]
    [j in 1:3], sum(x[:, j]) >= d[j]
    eta >= sum(C .* x)
    [i in 1:3], d[i] == dl[i] + du[i] * g[i]
    sum(g[i] for i in 1:3) <= 1.8
    sum(g[i] for i in 1:2) <= 1.2
end)
optimize!(MP)
LB = objective_value(MP)
println("Lower Bound: $LB")

z_star = value.(z)

M = 10_000
SP = Model(Gurobi.Optimizer)
set_silent(SP)
@variables(SP, begin
    x_sub[1:3, 1:3] >= 0
    d_sub[1:3] >= 0
    g_sub[1:3], Bin
    pi[1:6] >= 0
    v[1:6], Bin
    w[1:3, 1:3], Bin
end)
@objective(SP, Max, sum(C .* x_sub))
@constraints(SP, begin
    [i in 1:3], sum(x_sub[i, :]) <= z_star[i]
    [j in 1:3], sum(x_sub[:, j]) >= d_sub[j]
    [i in 1:3, j in 1:3], -pi[i] + pi[j + 3] <= C[i,j]
    SP_SLACK_CONS_1[i in 1:3], z_star[i] - sum(x_sub[i, :]) <= M * (1 - v[i])
    [j in 1:3], sum(x_sub[:, j]) - d_sub[j] <= M * (1 - v[j + 3])
    [i in 1:6], pi[i] <= M * v[i]
    [i in 1:3, j in 1:3], C[i,j] + pi[i] - pi[j + 3] <= M * (1 - w[i, j])
    [i in 1:3, j in 1:3], x_sub[i, j] <= M * w[i, j]
    [i in 1:3], d_sub[i] == dl[i] + du[i] * g_sub[i]
    sum(g_sub[i] for i in 1:3) <= 1.8
    sum(g_sub[i] for i in 1:2) <= 1.2
end)
UB = Inf
while abs(UB - LB) > 1e-5
    optimize!(SP)
    SP_objval = objective_value(SP)
    UB = LB - value(eta) + SP_objval
    println("Upper Bound: $UB")
    # New: query values before modification
    d_sub_star = value.(SP)
    k = k + 1
    x_new = @variable(MP, [1:3, 1:3] >= 0)
    # TODO: do you mean z[i] here, or the previous value of `z-star`?
    @constraint(MP, [i in 1:3], sum(x_new[i, :]) <= value(z[i]))
    @constraint(MP, [j in 1:3], sum(x_new[:, j]) >= d_sub_star[j])
    @constraint(MP, eta >= sum(C .* x_new))
    optimize!(MP)
    LB = max(LB, objective_value(MP))
    println("Lower Bound: $LB")
    # New: query values before modification
    z_star = value.(z)
    v_star = value.(v)
    # What's going on here?
    delete.(MP_Cons_3)
    unregister(MP, :MP_Cons_3)
    delete.(SP_SLACK_CONS_1)
    unregister(SP, :SP_SLACK_CONS_1)
    # TODO: which variables do you mean? `x_sub` is from SP
    @constraint(MP, MP_Cons_3[i in 1:3], sum(x_sub[i, j] for j in 1:3) <= value(z[i]))
    @constraints(SP, begin
        [i in 1:3], sum(x_sub[i, j] for j in 1:3) <= z_star[i]
        SP_SLACK_CONS_1[i in 1:3], z_star[i] - sum(x_sub[i, j] for j in 1:3) <= M * (1 - v_star[i])
    end)
end
2 Likes

Dear @odow thank you for your help.

1 Like

Hi, Dowson. I’m also studying this C&CG algorithm right now (and the same case study as the one Salma provided). Do you have some ideas where can I find a related package or code snippet about this algorithm, in julia? I wrote one myself, and I’d like to make some comparisons, because I’m not 100% sure that my program is correct.

Did you accomplish this small case? What’s your final result?
I get an optimal cost at 30536, which is different from that in the paper (33680).
I’m now inspecting, is there anything wrong.

Hi @WalterMadelim, welcome to the forum.

I don’t know if there are any off-the-shelf implementations of row and column generation algorithms.

The closest thing might be GitHub - atoptima/Coluna.jl: Branch-and-Price-and-Cut in Julia

Perhaps start a new post with a reproducible example of your code.

small_case_in_BoZeng2012
C&CG algorithm

Because new users is limited to comment (<= 3 replies). I update as follows:

I know how the data in Table 1 comes now.
In the initialization phase, I’ve just used a gurobi-default initialization scenario (which happens to be the most advantageous case).
If just to change the initialization scenario, we can recover precisely those 4 numbers (w.r.t. 2 iterations) in Table 1.

I might know where the problem is. This algorithm is a bit involved.

1 Like

updated

The possible explanation: the subproblem is a Max-Min problem originally. For a given 1st-stage decision y, it may happens that there is a scenario u such that the resulting value is ∞. In this case, we need to incorporate this significant u into the master problem at next iteration.

But if we cast the original Max-Min problem to a single-level Max problem with the KKT condition, we might miss these significant u’s which might have rendered the 2nd-decision phase infeasible.

Is there an actionable question about JuMP here?

If you’re trying to reproduce results from a paper, then you likely need to code the algorithm exactly the same. If you get different results, then either you have a mistake in your code, your reformulation is not applicable, or there is a typo in the original paper.

and the same case study as the one Salma provided
I get an optimal cost at 30536, which is different from that in the paper (33680).
I know how the data in Table 1 comes now.

I don’t know what case study, paper, or table you are referring to :smile:

Hey. Here’s the code that works : :slight_smile:

#MP:

using JuMP
using Gurobi

f = [400, 414, 326]
a = [18, 25, 20]
C = [[22 33 24];[33 23 30];[20 25 27]]
D = [206 + 40, 274 + 40, 220 + 40]
dl = [206, 274, 220]
du = [40, 40, 40]
k = 0

MP = Model(Gurobi.Optimizer)
set_silent(MP)
@variables(MP, begin
x[1:3, 1:3] >= 0
y[1:length(f)], Bin
z[1:length(a)] >= 0
d[1:3] >= 0
eta >= 0
0 <= g[1:3] <= 1
end)

@objective(MP, Min, f’ * y + a’ * z + eta)
@constraints(MP, begin
[i in 1:3], z[i] <= 800 * y[i]
sum(z) >= 772
[i in 1:3], sum(x[i, :]) <= z[i]
[j in 1:3], sum(x[:, j]) >= d[j]
eta >= sum(C .* x)
[i in 1:3], d[i] == dl[i] + du[i] * g[i]
sum(g[i] for i in 1:3) <= 1.8
sum(g[i] for i in 1:2) <= 1.2
end)

optimize!(MP)
LB = objective_value(MP)
println(“Lower Bound: $LB”)

#Recourse:
z_star = value.(z)
M = 10000
SP = Model(Gurobi.Optimizer)
set_silent(SP)
@variables(SP, begin
x_sub[1:3, 1:3] >= 0;
d_sub[1:3] >= 0;
0<=g_sub[1:3]<=1;
pi[1:6] >= 0;
v[1:6], Bin;
w[1:3, 1:3], Bin;
end)
@objective(SP, Max, sum(C .* x_sub));
@constraints(SP, begin
SP_Cons_1[i in 1:3], sum(x_sub[i, :]) <= z_star[i];
[j in 1:3], sum(x_sub[:, j]) >= d_sub[j];
[i in 1:3, j in 1:3], -pi[i] + pi[j + 3] <= C[i,j];

SP_SLACK_CONS_1[i in 1:3], z_star[i] - sum(x_sub[i, :]) <= M * (1 - v[i]);
[j in 1:3], sum(x_sub[:, j]) - d_sub[j] <= M * (1 - v[j + 3]);
[i in 1:6], pi[i] <= M * v[i];
    
[i in 1:3, j in 1:3], C[i,j] + pi[i] - pi[j + 3] <= M * (1 - w[i, j]);
[i in 1:3, j in 1:3], x_sub[i, j] <= M * w[i, j];
    
[i in 1:3], d_sub[i] == dl[i] + du[i] * g_sub[i];

sum(g_sub[i] for i in 1:3)<=1.8;
sum(g_sub[i] for i in 1:2)<=1.2;
end)
optimize!(SP)
println(“objective:”, objective_value(SP))

#CCG

UB = Inf
while abs(UB - LB) > 1e-5
optimize!(SP)
SP_objval = objective_value(SP)
println(“SP_objval: $SP_objval”)
UB = LB - value(eta) + SP_objval
println(“Upper Bound: $UB”)
# New: query values before modification
d_sub_star = value.(d_sub)
println(“d_sub=”,value.(d_sub_star))
unregister(MP,:x_new)
@variable(MP, x_new[1:3, 1:3] >= 0)
k = k + 1
@constraint(MP, [i in 1:3], sum(x_new[i, :]) <= z[i])
@constraint(MP, [j in 1:3], sum(x_new[:, j]) >= d_sub_star[j])
@constraint(MP, eta >= sum(C .* x_new))
optimize!(MP)
LB = max(LB, objective_value(MP))
println(“Lower Bound: $LB”)
# New: query values before modification
z_star = value.(z)
v_star = value.(v)
unregister(SP,:x_new)
delete.(SP, SP_Cons_1)
unregister(SP,:SP_Cons_1)
delete.(SP,SP_SLACK_CONS_1)
unregister(SP,:SP_SLACK_CONS_1)
@constraints(SP, begin
SP_Cons_1[i in 1:3], sum(x_sub[i, j] for j in 1:3) <= z_star[i]
SP_SLACK_CONS_1[i in 1:3], z_star[i] - sum(x_sub[i, j] for j in 1:3) <= M * (1 - v[i])
end)
end
println(“Iteration finished! We found the optimal solution!”)
println(“Final Objective:{0}”,UB)

1 Like

I have implemeted the CCG using strong duality. I can share it too.

The paper is “Solving two-stage robust optimization problems using a
column-and-constraint generation method” Redirecting

Thank you both. Don’t worry, I’ve figured it out, and have revised the code in my repository as the link I’ve mentioned.

1 Like