Hi,

I have a question with JuMP multithreading performance that I would like to ask for your help. I wanted to solve a specific optimization problem 4 times, and I did it in 2 ways:

- Solve it repeatedly 4 times on a single thread.
- Solve it on 4 threads simultaneously, one time on each thread. (my laptop has 4 physical cores, and I set the number of threads to 4)

Here is the code I used in the two cases:

- Case 1

```
function model_solve()
model = Model(Ipopt.Optimizer)
set_silent(model)
@variable(model, x)
@variable(model, y)
@NLobjective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2)
for h in 1:4
time_run = 0
for i in 1:50
time_start = time_ns()
optimize!(model)
time_run += time_ns() - time_start
end
println(time_run)
end
end
```

- Case 2:

```
function model_solve311(i)
model = Model(Ipopt.Optimizer)
set_silent(model)
@variable(model, x)
@variable(model, y)
@NLobjective(model, Min, (1 - x)^2 + 100 * (y - x^2)^2)
return model
end
function model_solve312(model)
time_run = 0
for j in 1:50
time_start = time_ns()
optimize!(model)
time_run += time_ns() - time_start
end
println(time_run)
end
function model_solve31(i)
model = model_solve311(i)
model_solve312(model)
end
function tmap(model_solve31)
tasks = [Threads.@spawn model_solve31(i) for i in 1:4]
out = [fetch(t) for t in tasks]
end
```

It seems like the time it takes to solve the problem one time in case 2 is significantly higher than that in case 1 (time_run in case 1 - model_solve() - is about **0.65 seconds**, while in case 2 - tmap(model_solve31) - is around **2 seconds**). I would really appreciate it if you guys could help me explain this behavior and suggest some ways to reduce the multithreading solving time.

Regards