In the context of my example, this is not the case.
Mine is RLT cuts—a set of finitely many (but numerous) explicit constraints.
I did a test on whether there is a need to manually promote an pure-LP to a MIP by introducing a dummy integer decision (in order to piggyback on Gurobi’s Lazy
constraint attribute).
The test result indicates: needless—it won’t be faster with this contrived trick, whether the problem scale is small, moderate or large.
import JuMP, Random, Gurobi; GRB_ENV = Gurobi.Env();
function optimise(m)
JuMP.optimize!(m)
return (
JuMP.termination_status(m),
JuMP.primal_status(m),
JuMP.dual_status(m)
)
end;
macro set_objective_function(m, f) return esc(:(JuMP.set_objective_function($m, JuMP.@expression($m, $f)))) end;
function Model(name)
m = JuMP.direct_model(Gurobi.Optimizer(GRB_ENV))
JuMP.MOI.set(m, JuMP.MOI.Name(), name)
s = name[end-1]
s == 'm' && JuMP.set_objective_sense(m, JuMP.MIN_SENSE)
s == 'M' && JuMP.set_objective_sense(m, JuMP.MAX_SENSE)
last(name) == 's' && JuMP.set_silent(m)
m
end;
function solve_to_normality(m)
t, p, d = optimise(m)
name = JuMP.name(m)
t == JuMP.OPTIMAL || error("$name: $(string(t))")
p == JuMP.FEASIBLE_POINT || error("$name: (primal) $(string(p))")
(name[end-2] == 'l' && d != p) && error("$name: (dual) $(string(d))")
return JuMP.result_count(m)
end;
N = 22500
Random.seed!(1)
model = Model("test_bmD")
JuMP.set_attribute(model, "Presolve", 0)
JuMP.set_attribute(model, "TimeLimit", 60 * 9)
JuMP.@variable(model, rand(-9:.17:-1) <= x[1:N] <= rand(0.7:.173:9))
# dummy_decision = JuMP.@variable(model, binary = true, upper_bound = 0)
JuMP.@constraint(model, non_lazy_constr, sum(x) >= -5)
for i in 1:(N ÷ 2)
c = JuMP.@constraint(model, rand(-2:2, N)' * x <= rand(-1:1))
# JuMP.MOI.set(JuMP.backend(model), Gurobi.ConstraintAttribute("Lazy"), JuMP.index(c), 1)
end
@set_objective_function(model, rand(-1:.17:1, N)' * x)
solve_to_normality(model)
JuMP.objective_bound(model) # [Test Result] LP: -52262.27784988559 > MIP: -53552.45952117307;