I find a new (perhaps better) method to write this NormOneCone constraint, when the model is intended to be revise-and-solved iteratively. @odow is this good? Give me some advice
import LinearAlgebra.norm as norm
import JuMP, Gurobi
I = 2; # `I` is the length of the decision vector `x`
x_old = rand(2); # The new solution `value(x)` after `optimize!` should stay close to `x_old`
# In this code file, I propose to use formulation `reg2` ✅, instead of the existing `reg0`
# `reg0` is the original version of regularization subprogram
reg0 = JuMP.Model(Gurobi.Optimizer); JuMP.set_silent(reg0);
JuMP.@variable(reg0, x[1:I])
JuMP.@variable(reg0, adx[1:I]) # adx[i] attempts to = abs(x[i] - x_old[i]), ∀i
JuMP.@constraint(reg0, c1[i = 1:I], x[i] - x_old[i] ≤ adx[i])
JuMP.@constraint(reg0, c2[i = 1:I], x_old[i] - x[i] ≤ adx[i])
JuMP.@objective(reg0, Min, 2 * sum(adx)) # it's immaterial to mul a positive coefficiant `2`
JuMP.optimize!(reg0); JuMP.assert_is_solved_and_feasible(reg0; allow_local = false)
distance = norm(JuMP.value.(x) - x_old, 1)
println("distance = $distance")
# We introduce 2 new decisions (m, p) based on the above formulation
reg1 = JuMP.Model()
JuMP.@variable(reg1, x[1:I])
JuMP.@variable(reg1, adx[1:I])
JuMP.@variable(reg1, m[1:I])
JuMP.@variable(reg1, p[1:I])
JuMP.@constraint(reg1, [i = 1:I], adx[i] == m[i] + x[i]) # m[i] := adx[i] - x[i], ∀i
JuMP.@constraint(reg1, [i = 1:I], adx[i] == p[i] - x[i]) # p[i] := adx[i] + x[i], ∀i
# update these 2 constrs
JuMP.@constraint(reg1, c1[i = 1:I], -x_old[i] ≤ m[i])
JuMP.@constraint(reg1, c2[i = 1:I], x_old[i] ≤ p[i])
JuMP.@objective(reg1, Min, sum(adx + adx))
# Finally, we eliminate the free decision `adx` and derive the following ✅
reg2 = JuMP.Model(Gurobi.Optimizer); JuMP.set_silent(reg2);
JuMP.@variable(reg2, x[1:I])
JuMP.@variable(reg2, m[1:I])
JuMP.@variable(reg2, p[1:I])
JuMP.@expression(reg2, new_obj_expr, sum(m + p))
JuMP.@constraint(reg2, [i = 1:I], m[i] + x[i] == p[i] - x[i]) # p[i] := adx[i] + x[i], ∀i
JuMP.set_lower_bound.(m, -x_old) # in place of `c1`
JuMP.set_lower_bound.(p, x_old) # in place of `c2`
JuMP.@objective(reg2, Min, new_obj_expr)
# 🖥️ Let's do some tests on `reg2`
for t in 1:100
x_old = rand(2)
let # update `reg2`
JuMP.set_lower_bound.(m, -x_old)
JuMP.set_lower_bound.(p, x_old)
JuMP.@objective(reg2, Min, new_obj_expr)
end
JuMP.optimize!(reg2); JuMP.assert_is_solved_and_feasible(reg2; allow_local = false)
distance = norm(JuMP.value.(x) - x_old, 1)
iszero(distance) || error("distance is not zero!")
end