I am dealing with a projection problem in a space defined by multiple linear constraints, with a problem dimension of about 1000, and I have 1000 such problems in a ‘for’ loop. I initially used the OSQP solver with JuMP, but then it was too slow. Then I removed JuMP and used SparseArrays to construct and solve the problem, but it’s now taking 60-120 seconds, which is faster than before but still slow. Is there a way to speed up the process? Or is there a better solver I could use for this projection problem?
The constraints are A_eq * x = b_eq, A_upper * x >= p.clipping_threshold .* ones(n_upper) and A_lower * x <= -p.clipping_threshold .* ones(n_lower) and I am trying to minimize norm(x - x_ellipsoid), where x is the decision variable. Originally, I had an ellipsoidal constraint as well. Still, it seems that whenever I project it onto the ellipsoid by scaling, then to the space of linear constraints as given above, it still satisfies the ellipsoidal constraint, so I am not including that in the optimization problem.
Please see the following code for the problem construction:
function build_osqp_solver(
A_eq::Matrix{Float64},
A_upper::Matrix{Float64},
A_lower::Matrix{Float64},
n::Int,
p::ModelParams
)
n_eq = size(A_eq, 1)
n_upper = size(A_upper, 1)
n_lower = size(A_lower, 1)
n_con = n_eq + n_upper + n_lower
P = sparse(2.0 * I, n, n)
if n_con == 0
A_con = spzeros(0, n)
else
blocks = Matrix{Float64}[]
n_eq > 0 && push!(blocks, A_eq)
n_upper > 0 && push!(blocks, A_upper)
n_lower > 0 && push!(blocks, A_lower)
A_con = sparse(vcat(blocks...))
end
# Cache l and u — we'll mutate the equality rows in each sample
l = zeros(n_con)
u = zeros(n_con)
if n_upper > 0
l[n_eq+1 : n_eq+n_upper] .= p.clipping_threshold
u[n_eq+1 : n_eq+n_upper] .= Inf
end
if n_lower > 0
l[n_eq+n_upper+1 : end] .= -Inf
u[n_eq+n_upper+1 : end] .= -p.clipping_threshold
end
solver = OSQP.Model()
OSQP.setup!(solver;
P = P,
q = zeros(n),
A = A_con,
l = l,
u = u,
warm_starting = true,
verbose = false,
eps_abs = 1e-6,
eps_rel = 1e-6,
max_iter = 10000,
polish = true
)
# Return cached l and u so we can mutate them per sample
return solver, n_eq, l, u
end
function project_coeff_fast(
x0::Vector{Float64},
b_eq::Vector{Float64},
solver::OSQP.Model,
n_eq::Int,
l::Vector{Float64}, # mutated in place
u::Vector{Float64}, # mutated in place
p::ModelParams
)
x_ellipsoid = project_onto_ellipsoid(x0, p)
q = -2.0 .* x_ellipsoid
if n_eq > 0
# Update equality rows in the cached vectors, then pass full l and u
l[1:n_eq] .= b_eq
u[1:n_eq] .= b_eq
OSQP.update!(solver; q=q, l=l, u=u)
else
OSQP.update!(solver; q=q)
end
results = OSQP.solve!(solver)
if results.info.status != :Solved && results.info.status != :Almost_Solved
@warn "OSQP status: $(results.info.status). Returning ellipsoid projection."
return x_ellipsoid
end
return results.x
end