I did some experiments with parallel processing, i.e. solving models in parallel. But the result is not what I expected:
using Distributed
import JuMP
addprocs(4)
function simple_model(a, b)
model = JuMP.Model()
JuMP.@variable(model, 0 <= x <= 2)
JuMP.@variable(model, 0 <= y <= 30)
JuMP.@objective(model, Max, a*x + b*y)
JuMP.@constraint(model, 1x + 5y <= 3.0)
return model
end
@everywhere import Clp, JuMP
@everywhere function solve_model(model)
optimizer = Clp.Optimizer
optimizer_params = Dict("LogLevel" => 0)
JuMP.set_optimizer.(model, optimizer)
JuMP.set_optimizer_attributes.(model, optimizer_params...)
JuMP.optimize!(model)
return
end
models = [simple_model(rand(), rand()) for i in 1:1000];
t0 = time()
for i in 1:10
map(solve_model, models)
end
t1 = time()
println("Serial execution: elapsed time: ", (t1-t0)/10, " seconds")
t0 = time()
for i in 1:10
pmap(solve_model, models, batch_size=1)
end
t1 = time()
println("Parallel execution: elapsed time: ", (t1-t0)/10 " seconds")
Serial execution: elapsed time: 1.6002999782562255 seconds
Parallel execution: elapsed time: 3.769099998474121 seconds
If I increase the batch_size
to 10 I get better results:
Serial execution: elapsed time: 1.470799994468689 seconds
Parallel execution: elapsed time: 1.8813000202178956 seconds
But still, the parallel processing approach is slower.
Am I doing something wrong?