I’m having issues using MadNLPGPU.jl, while a similar code works for MadNLP.jl. I initially tried a direct conversion, then I read that Examodels is better for GPUs. I am not attached to the particular method to solve on GPUs. Whatever is fast and easy to use is good.
The dataset for the problem is not particularly large (hundreds of generators).
CPU code, works great!
function economic_dispatch(coef_a, coef_b, coef_c, max_mw, min_mw, demand, demand_std, start_p, on_cost, gen_config)
@assert length(coef_a) == length(coef_b) == length(coef_c) == length(max_mw) == length(min_mw) == length(start_p)
num_gen = length(coef_a)
model = Model(()->MadNLP.Optimizer(print_level=MadNLP.ERROR, max_iter=100, tol=1e-6))
@variable(model, min_mw[i] <= p[i=1:num_gen] <= max_mw[i], start = start_p[i])
@constraint(model, demand_constraint, sum(p[i] for i in 1:num_gen) >= demand)
@NLobjective(model, Min, sum(coef_a[i]*p[i]^2 + coef_b[i]*p[i] + coef_c[i] for i in 1:num_gen))
optimize!(model)
return value.(p), objective_value(model)
end
GPU code (defaults to CPU always, objective slightly worse in the Examodels framework).
function economic_dispatch_exa(coef_a, coef_b, coef_c, max_mw, min_mw, demand, demand_std, start_p, on_cost, gen_config)
@assert length(coef_a) == length(coef_b) == length(coef_c) == length(max_mw) == length(min_mw) == length(start_p)
num_gen = length(coef_a)
# 1. Define the ExaModel
em = ExaModels.ExaModel()
# Define variables and their bounds (p)
ExaModels.variable(
em,
n=num_gen,
lvar=min_mw,
uvar=max_mw,
start=start_p,
name="p"
)
# 2. Define the Objective Function (Cost)
# The variables are accessed by their name ("p") and index (i)
ExaModels.objective(
em,
sum(
coef_a[i] * em[:p, i]^2 + coef_b[i] * em[:p, i] + coef_c[i]
for i=1:num_gen
)
)
# 3. Define the Constraint (Demand)
# The constraint is now applied as an inequality: sum(p) >= demand
ExaModels.constraint(
em,
sum(em[:p, i] for i=1:num_gen),
lcon=[demand], # lower bound
ucon=[Inf], # upper bound (as it's an inequality >= demand)
name="demand_constraint"
)
# 4. Instantiate the Solver with MadNLPGPU and solve
# Note: ExaModels returns an object containing the solution, not a JuMP model
solver_options = (
print_level=MadNLP.ERROR,
max_iter=100,
tol=1e-6,
linear_solver=MadNLPGPU.CUDSSSolver,
device_type="cuda" # This option is usually taken care of by MadNLPGPU
)
result = MadNLP.madnlp(em; solver_options...)
# 5. Return the results
# The primal solution (p values) is in the `primal` field of the result object
# The objective value is in the `objval` field
return result.primal, result.objval
end
The error is:
┌ Warning: GPU solve failed: UndefVarError(:economic_dispatch_gpu, 0x000000000000982b, Main), falling back to CPU
└ @ Main