A semicolon (;) fails to suppress showing in Julia REPL

The title is observed by the behavior like this

julia> JuMP.@constraint(lpr, c_rlt[r in 1:R1, c in 1:R2], sm(A1[r, :] * A2[c, :]', cross_multrix) >= 0); #
8Γ—18 Matrix{JuMP.ConstraintRef{JuMP.Model, MathOptInterface.ConstraintIndex{MathOptInterface.ScalarAffineFunction{Float64}, MathOptInterface.GreaterThan{Float64}}, JuMP.ScalarShape}}:
 c_rlt[1,1] : -22 lpr_g[1] - 22 lpr_g[2] + 1.2 lpr_Dv[1] - 1.2 lpr_Dv[4] - g_x_Dv[1,1] - g_x_Dv[2,1] + g_x_Dv[1,4] + g_x_Dv[2,4] >= -26.4                                            …  c_rlt[1,18] : 1.2 lpr_Dv[6] - g_x_Dv[1,6] - g_x_Dv[2,6] >= 0
 c_rlt[2,1] : -22 lpr_g[1] - 22 lpr_g[2] - 22 lpr_g[3] + 1.8 lpr_Dv[1] - 1.8 lpr_Dv[4] - g_x_Dv[1,1] - g_x_Dv[2,1] - g_x_Dv[3,1] + g_x_Dv[1,4] + g_x_Dv[2,4] + g_x_Dv[3,4] >= -39.6     c_rlt[2,18] : 1.8 lpr_Dv[6] - g_x_Dv[1,6] - g_x_Dv[2,6] - g_x_Dv[3,6] >= 0
 c_rlt[3,1] : 22 lpr_g[1] + g_x_Dv[1,1] - g_x_Dv[1,4] >= 0                                                          
                                                                    c_rlt[3,18] : g_x_Dv[1,6] >= 0
 c_rlt[4,1] : 22 lpr_g[2] + g_x_Dv[2,1] - g_x_Dv[2,4] >= 0                                                          
                                                                    c_rlt[4,18] : g_x_Dv[2,6] >= 0
 c_rlt[5,1] : 22 lpr_g[3] + g_x_Dv[3,1] - g_x_Dv[3,4] >= 0                                                          
                                                                    c_rlt[5,18] : g_x_Dv[3,6] >= 0
 c_rlt[6,1] : -22 lpr_g[1] + lpr_Dv[1] - lpr_Dv[4] - g_x_Dv[1,1] + g_x_Dv[1,4] >= -22                               
                                                                 …  c_rlt[6,18] : lpr_Dv[6] - g_x_Dv[1,6] >= 0      
 c_rlt[7,1] : -22 lpr_g[2] + lpr_Dv[1] - lpr_Dv[4] - g_x_Dv[2,1] + g_x_Dv[2,4] >= -22                               
                                                                    c_rlt[7,18] : lpr_Dv[6] - g_x_Dv[2,6] >= 0      
 c_rlt[8,1] : -22 lpr_g[3] + lpr_Dv[1] - lpr_Dv[4] - g_x_Dv[3,1] + g_x_Dv[3,4] >= -22                               
                                                                    c_rlt[8,18] : lpr_Dv[6] - g_x_Dv[3,6] >= 0      

I use the latest JuMP and Julia. Is this a bug (although not a serious one)?
The code to reproduce this is in the next post.

(The following code is an example on using LP relaxation to solve a 2-stage ARO problem.
The case is an RCR variant of the small case in CCG2012.)
The bug of the title occurs at line 104.
Since no Complex number are involved here.
After rewriting the adjoint with transpose, the bug is resolved.

import JuMP, Gurobi; GRB_ENV = Gurobi.Env();
import SparseArrays; import LinearAlgebra.tr as tr
function sm(p, x) return sum(p .* x) end
function optimise(m)
    JuMP.optimize!(m)
    return (
        JuMP.termination_status(m),
        JuMP.primal_status(m),
        JuMP.dual_status(m)
    )
end
macro set_objective_function(m, f) return esc(:(JuMP.set_objective_function($m, JuMP.@expression($m, $f)))) end
function Model(name) # e.g. "st2_lms" or "st1_0M0" or "supp_l0s"
    m = JuMP.Model(() -> Gurobi.Optimizer(GRB_ENV))
    JuMP.MOI.set(m, JuMP.MOI.Name(), name)
    s = name[end-1]
    s == 'm' && JuMP.set_objective_sense(m, JuMP.MIN_SENSE)
    s == 'M' && JuMP.set_objective_sense(m, JuMP.MAX_SENSE)
    last(name) == 's' && JuMP.set_silent(m)
    m
end
function solve_to_normality(m)
    t, p, d = optimise(m)
    name = JuMP.name(m)
    t == JuMP.OPTIMAL || error("$name: $(string(t))")
    p == JuMP.FEASIBLE_POINT || error("$name: (primal) $(string(p))")
    (name[end-2] == 'l' && d != p) && error("$name: (dual) $(string(d))")
end
function safe_upper_bound(ObjBnd) return max(0., ObjBnd + 9.99, 1.01 * ObjBnd) end
function g2d(g) return B_d .+ 40 * g end
function stage2_problem_primal(z, d)
    st2 = Model("st2_primal_lms")
    JuMP.@variable(st2, x[i = 1:3, j = 1:3] >= 0)
    JuMP.@variable(st2, ΞΆ[i = 1:3] >= 0) # an expensive substitute for `z`
    JuMP.@constraint(st2, DI[i = 1:3], z[i] + ΞΆ[i]  >= sum(x[i, :]))
    JuMP.@constraint(st2, DJ[j = 1:3], sum(x[:, j]) >= d[j]        )
    @set_objective_function(st2, sm(R_x, x) + sm(R_ΞΆ, ΞΆ))
    solve_to_normality(st2);
    return JuMP.objective_value(st2)
end
function st2_dual_obj_f(z, d, DI, DJ) return sm(DJ, d) - sm(DI, z) end # 1st-stage decision; scene; 2nd-stage dual decision
function stage2_problem_dual(z, d) # βœ…
    st2 = Model("st2_dual_lMs")
    JuMP.@variable(st2, 0 <= DI[i = 1:3] <= R_ΞΆ[i]) # this dual variable is bounded above, because its associated primal constraint is "price-relaxed", the value of `ΞΆ` can be recovered through `-dual(UpperBoundRef(DI))`
    JuMP.@variable(st2, 0 <= DJ[j = 1:3]) # implicitly upper bounded
    JuMP.@constraint(st2, x[i = 1:3, j = 1:3], R_x[i, j] + DI[i] - DJ[j] >= 0)
    @set_objective_function(st2, st2_dual_obj_f(z, d, DI, DJ))
    solve_to_normality(st2)
    return JuMP.objective_value(st2), JuMP.value.(DI), JuMP.value.(DJ)
end
function get_st1_oz_cut(z, d)
    _, DI, DJ = stage2_problem_dual(z, d)
    return sm(DJ, d), -DI # cn, pz
end
function g_2_DI_DJ(z, g) return stage2_problem_dual(z, g2d(g)) end
function DI_DJ_2_g(z, DI, DJ) # This function embodies the uncertainty set, if OBJ were vacant
    adversarial = Model("adversarial_lMs")
    JuMP.@variable(adversarial, 0 <= g[1:3] <= 1)
    JuMP.@constraint(adversarial, sum(first(g, 2)) <= 1.2); JuMP.@constraint(adversarial, sum(g) <= 1.8)
    @set_objective_function(adversarial, st2_dual_obj_f(z, g2d(g), DI, DJ))
    solve_to_normality(adversarial)
    return JuMP.objective_value(adversarial), JuMP.value.(g)
end
function get_A1_A2() # A1 is the uncertainty's; A2 is the st2_dual's
    A1 = SparseArrays.spzeros(R1, 1 + C1); A2 = SparseArrays.spzeros(R2, 1 + C2)
    A1[1, 1] = 1.2; A1[1, 2] = -1; A1[1, 3] = -1; # r1
    A1[2, 1] = 1.8; A1[2, 2] = -1; A1[2, 3] = -1; A1[2, 4] = -1; # r2
    A1[3, 2] = 1; A1[4, 3] = 1; A1[5, 4] = 1; # r3
    A1[6:8, 1] .= 1; A1[6, 2] = -1; A1[7, 3] = -1; A1[8, 4] = -1; # r4
    for i in 1:3, j in 1:3 # constraint c1
        r = (i-1) * 3 + j
        A2[r, 0+ 1] = R_x[i, j]
        A2[r, 1+ i] =  1
        A2[r, 1+3+ j] = -1
    end
    [A2[i, j] = 1 for (i, j) in zip(10:12, 2:4)] # constraint c2
    [A2[i, j] = -1 for (i, j) in zip(13:15, 2:4)]; A2[13:15, 1] .= R_ΞΆ # constraint c3
    [A2[i, j] = 1 for (i, j) in zip(16:18, 5:7)] # constraint c4
    return A1, A2
end
function improve_g_by_BCA(z, g) # Block Coordinate Ascent
    _, DI, DJ    = g_2_DI_DJ(z, g)
    return lb, g = DI_DJ_2_g(z, DI, DJ) # enhanced ObjVal=lb & scene=g
end
R_x, C_y, C_z, B_d = let # Data
    [22 33 24; 33 23 30; 20 25 27], [400, 414, 326], [18, 25, 20], [206, 274, 220]
end; R_ΞΆ = 20 * C_z; # recourse cost for z, me designed
# πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž
st1 = Model("st1_bms"); JuMP.@variable(st1, st1_o);
JuMP.@variables(st1, begin st1_y[1:3], Bin; st1_z[1:3] >= 0 end); JuMP.@constraint(st1, st1_z .<= 800 * st1_y);
JuMP.set_objective_coefficient(st1, st1_y, C_y); JuMP.set_objective_coefficient(st1, st1_z, C_z);
solve_to_normality(st1); z = JuMP.value.(st1_z); # 🍏
# πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž
best_lpr_ul = zeros(2); best_g = zeros(3); # global book
old_feas_val = zeros(1); # if the heuristic solution of BLP reaches a different value, then we deem it new
R1=1+1+3+3;     C1=3; # 1 β‡’ uncertainty_side, 2 β‡’ st2_dual side
R2=3*3+3+3+3; C2=3+3; A1, A2 = get_A1_A2();
# πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž πŸ₯Ž
lpr = Model("lpr_lMs");
JuMP.@variables(lpr, begin lpr_g[1:C1]; lpr_Dv[1:C2]; g_x_Dv[1:C1, 1:C2] end);
JuMP.@expression(lpr, cross_multrix, [1 lpr_Dv'; lpr_g g_x_Dv]); JuMP.@expression(lpr, lpr_DI, lpr_Dv[1:3]);
JuMP.@expression(lpr, lpr_DJ, lpr_Dv[4:6]); JuMP.@expression(lpr, g_x_DJ, g_x_Dv[:, 4:6]);
JuMP.@constraint(lpr, [A1 * [1; lpr_g]; A2 * [1; lpr_Dv]] .>= 0); # [uncertainty_side; st2_dual_side] constraints
JuMP.@constraint(lpr, c_rlt[r in 1:R1, c in 1:R2], sm(A1[r, :] * A2[c, :]', cross_multrix) >= 0); # First, add all RLT constrs
@set_objective_function(lpr, sm(B_d, lpr_DJ) + 40 * tr(g_x_DJ)) # common static part
JuMP.set_objective_coefficient(lpr, lpr_DI, -z); # 🟑 1st-decision dependent
solve_to_normality(lpr); 
c_safe_ub = JuMP.@constraint(lpr, safe_upper_bound(JuMP.objective_bound(lpr)) >= JuMP.objective_function(lpr));
let # initialize global book
    solve_to_normality(lpr)
    best_lpr_ul[1] = JuMP.objective_bound(lpr)
    g = JuMP.value.(lpr_g) # get a heuristic solution from `lpr`
    old_feas_val[1] = st2_dual_obj_f(z, g2d(g), JuMP.value.(lpr_DI), JuMP.value.(lpr_DJ)) # record the old value to help discovering new heuristic solution
    lb, g = improve_g_by_BCA(z, g)
    best_lpr_ul[2] = lb; best_g .= g
end;
JuMP.set_objective_coefficient(st1, st1_o, 1); # 1️⃣ a one-shot turn on
cn, pz = get_st1_oz_cut(z, g2d(best_g)); JuMP.@constraint(st1, st1_o >= cn + sm(pz, st1_z));
solve_to_normality(st1); z = JuMP.value.(st1_z); # πŸβœ… At this line, the 1st-stage is auto bounded
for i in 1:13 # After this iteration, We can prove that it converges to global optimality
    JuMP.delete(lpr, c_safe_ub)
    JuMP.set_objective_coefficient(lpr, lpr_DI, -z); # 🟑 1st-decision variant
    solve_to_normality(lpr); 
    c_safe_ub = JuMP.@constraint(lpr, safe_upper_bound(JuMP.objective_bound(lpr)) >= JuMP.objective_function(lpr))
    let # initialize global book
        solve_to_normality(lpr)
        best_lpr_ul[1] = JuMP.objective_bound(lpr)
        g = JuMP.value.(lpr_g) # get a heuristic solution from `lpr`
        old_feas_val[1] = st2_dual_obj_f(z, g2d(g), JuMP.value.(lpr_DI), JuMP.value.(lpr_DJ)) # record the old value to help discovering new heuristic solution
        lb, g = improve_g_by_BCA(z, g)
        best_lpr_ul[2] = lb; best_g .= g
    end
    cn, pz = get_st1_oz_cut(z, g2d(best_g)); JuMP.@constraint(st1, st1_o >= cn + sm(pz, st1_z))
    solve_to_normality(st1); z = JuMP.value.(st1_z) # 🍏
    @info "lb = $(JuMP.objective_bound(st1))"
end

This is indeed a bug, but has almost nothing to do with JuMP or your code β€” it’s just the single postfix ' that’s confusing things:

3 Likes

Oh, yes. I replace the adjoint with transpose then it doesn’t show anymore.
Does the developers have a plan to fix this issue?

You could! The ; suppression is a heuristic based upon an overly simplistic parsing of the input. It lives here; one pageful of Julia code: julia/stdlib/REPL/src/REPL.jl at 13311f324e850fefddfcdf43d6c93b9365e2cf46 Β· JuliaLang/julia Β· GitHub

In the meantime, you can work around it by deleting the trailing comment.

1 Like

I believe I’ve fixed this. It was a sort of drive-by fix (I’m leaving in a few minutes, and no longer follow up much on my PRs), didn’t mean to add/have a test, not in the habit… but added it also after seeing β€œtest needed” label (believe one failure is just the label needs to go away and the other must be a false alarm β€œbuild i686-w64-mingw32”):

1 Like