Here is my full implementation of Benders decomposition on a toy example:
using JuMP, Gurobi, Random
function test(S, Maxiter)
T=24
N=10
cost_1=[4.3169962, 4.5086696, 4.0460142, 4.9007914, 4.554301799999999, 6.3083598, 5.9217375, 6.4922892, 6.1188277, 5.4346844]
Pmin=[32.0, 37.0, 48.0, 48.0, 47.0, 50.0, 62.0, 55.0, 85.0, 84.0]
Pmax=[118.0, 119.0, 105.0, 113.0, 102.0, 190.0, 208.0, 211.0, 282.0, 323.0]
delta_up=[42.003808, 31.325072, 20.632301, 27.869072, 22.204262, 51.694535, 67.064482, 64.139573, 89.618227, 101.140357]
delta_down=[32.599073, 36.390287, 21.831385, 27.091832, 27.417263, 61.702851, 49.715649, 69.178653, 78.751405, 99.597226]
Demand=[953.170832, 902.233658, 736.221518, 730.912573, 743.969707, 801.50719, 958.479777, 1170.120146, 1366.551107, 1427.675715, 1434.849965, 1453.913382, 1428.536626, 1313.892113, 1354.641852, 1366.120652, 1360.237768, 1322.644699, 1270.70313, 1287.921329, 1323.649093, 1255.637205, 1141.566632, 1034.95728]
uncertainty=[randn(T)/100 for s in 1:S]
# master problem
master = Model(Gurobi.Optimizer)
set_silent(master)
@variable(master, is_on[i in 1:N, t in 1:T],Bin)
@variable(master, z[s in 1:S]>=0)
@objective(master, Min, sum(z[s] for s in 1:S)/S+sum(is_on[i,t]*cost_1[i] for i in 1:N for t in 1:T)) # second stage cost + first stage cost
# subproblem
subproblem = Model(Gurobi.Optimizer)
set_silent(subproblem)
@variable(subproblem, μₘᵢₙ[i in 1:N, t in 1:T]>=0)
@variable(subproblem, μₘₐₓ[i in 1:N, t in 1:T]>=0)
@variable(subproblem, ν[t in 1:T])
@constraint(subproblem, [i in 1:N, t in 1:T-1], μₘᵢₙ[i, t]-μₘₐₓ[i, t]+ν[t]==10)
@constraint(subproblem, [t in 1:T], ν[t]<=70)
@constraint(subproblem, [t in 1:T], ν[t]>=-70)
LB=1
UB=1e9
k=0
while k<=Maxiter && 100*(UB-LB)/LB>1 # convergence at 1%
optimize!(master)
k+=1
LB=objective_value(master)
is_onₖ=JuMP.value.(is_on)
z_val=JuMP.value.(z)
@objective(subproblem, Max, sum(μₘᵢₙ[i,t]*Pmin[i]*is_onₖ[i,t] - μₘₐₓ[i,t]*Pmax[i]*is_onₖ[i,t] for i in 1:N for t in 1:T))
mumin=[zeros(N,T) for s in 1:S]
mumax=[zeros(N,T) for s in 1:S]
nu=[zeros(T) for s in 1:S]
cost=zeros(S)
for s in 1:S
set_objective_coefficient(subproblem,ν, [Demand[t]*(1+uncertainty[s][t]) for t in 1:T])
optimize!(subproblem)
mumin[s]=JuMP.value.(μₘᵢₙ)
mumax[s]=JuMP.value.(μₘₐₓ)
nu[s]=JuMP.value.(ν)
cost[s]=objective_value(subproblem)
end
UB=min(UB,sum(cost)/S+sum(is_onₖ[i,t]*cost_1[i] for i in 1:N for t in 1:T))
if sum(cost)/S>1.001*sum(z_val)/S
@constraint(master, [s in 1:S], z[s]>= sum(mumin[s][i,t]*Pmin[i]*is_on[i,t] - mumax[s][i,t]*Pmax[i]*is_on[i,t] for i in 1:N for t in 1:T)+sum(nu[s][t]*Demand[t]*(1+uncertainty[s][t]) for t in 1:T))
end
end
end
In this implementation, I solve the dual problem directly. The corresponding primal formulation is:
primal = Model(Gurobi.Optimizer)
set_silent(subproblem)
@variable(primal, power[i in 1:N, t in 1:T]>=0)
@variable(primal, power_shedding[t in 1:T]>=0)
@variable(primal, power_curtailment[t in 1:T]>=0)
@constraint(subproblem, [i in 1:N, t in 1:T-1], power[i, t]<=Pmax[i])
@constraint(subproblem, [i in 1:N, t in 1:T-1], power[i, t]>=Pmin[i])
@constraint(subproblem, [t in 1:T], sum(power[i,t] for i in 1:N)+power_shedding[t]+power_curtailment[t]==Demand[t])*(1+uncertainty[s][t])
@objective(primal, Min, sum(power[i,t]*10 for i in 1:N for t in 1:T)+sum(power_shedding[t]*70 for t in 1:T)+sum(power_curtailment[t]*70 for t in 1:T))
For this small example, solving the deterministic equivalent is actually more efficient. However, in larger, more realistic instances, Benders decomposition proves to be the better approach.
I haven’t explicitly formulated the primal to generate cuts using dual(constraint), but I suspect that querying dual() wouldn’t be significantly faster than using value().
If you run @profview test(20, 10), you’ll notice that about 25% of the total runtime is spent retrieving variable values, with some runtime dispatch.