Multithreading unstable with JuMP

Hi, I am trying to use multithreading to make a loop parallel. That loop makes a call to a function to build the model and then optimise it. So individual iterations are independent, except that results are stored in arrays. Essentially inside the loop bound on one of the variables is being changed and the problem is is optimised at each iteration.

The actual problem is complicated and I have not been able to produce a MWE to produce that exact error. Below is a simplified example to give an idea about the code for the actual problem and what may be causing an error. This code sometimes produces an error (i.e. if you run it few times) which I am not sure whether is related to the problem being infeasible or multithreading.

I have also given the error message that I get on the actual problem at the bottom.
What may be causing the error on the actual problem (message shown at the bottom)? How can I fix it?

using DataFrames, JuMP, Clp, CSV
function generate_data(m)
    Values = rand(20.0:140.0, m)
    return Values
end

function build(c)
    model = Model(Clp.Optimizer)
    set_optimizer_attribute(model, MOI.Silent(), true)
    @variable(model, x[1:length(c)] >=0 )
    @variable(model, y <=10 )
    @objective(model, Min, sum(c[i] * x[i] for i = 1:length(c))-y)
    @constraint(model, con1, sum(x[i] for i = 1:length(c)) == 1)
    @constraint(model, con2, sum(c[i] * x[i] for i = 1:length(c)) <= 100)
    return model, x, y
end
function summation(Values)
    A= cumsum(Values, dims =1)
    return A
end
function some_thing(Values)
    B = sum(Values, dims =1)
    return B
end

function multithread_run(n,m)
A_id = Array{Float64,2}(undef, m,n)
B_id = Vector{Float64}(undef,n)
x_id = Array{Float64,2}(undef, m,n)
Threads.@threads for i in 1:n
        Values = generate_data(m)        
        bound = (i-1)*30 / n
        model, x, y = build(Values)
        set_upper_bound(y,bound)
        optimize!(model)
        x1 = JuMP.value.(x)
        A = summation(x1)
        B = some_thing(x1)
        A_id[:,i] = A
        B_id[i]  = B[1]
        x_id[:,i] = x1
    end
    df1 = DataFrame(x_id)
    df2 = DataFrame(A_id)
    df3 = DataFrame(ID=1:n,some_thing = B_id)
    CSV.write("DataFrame1.csv",df1)
    CSV.write("DataFrame2.csv",df2)
    CSV.write("DataFrame3.csv",df3)
    return A_id, x_id, B_id
end

Running multithread_run(10,3) few times, give the following error. I don’t know whether that is arising from multithreading or just because the problem is infeasible.

ERROR: TaskFailedException:
Primal solution not available
Stacktrace:
 [1] error(::String) at .\error.jl:33
 [2] get(::Clp.Optimizer, ::MathOptInterface.VariablePrimal, ::MathOptInterface.VariableIndex) at C:\Users\.julia\packages\Clp\3ZgbR\src\MOI_wrapper\MOI_wrapper.jl:467
 [3] get(::MathOptInterface.Utilities.CachingOptimizer{Clp.Optimizer,MathOptInterface.Utilities.UniversalFallback{MathOptInterface.Utilities.Model{Float64}}}, ::MathOptInterface.VariablePrimal, ::MathOptInterface.VariableIndex) at C:\Users \.julia\packages\MathOptInterface\k7UUH\src\Utilities\cachingoptimizer.jl:605
 [4] get(::MathOptInterface.Bridges.LazyBridgeOptimizer{MathOptInterface.Utilities.CachingOptimizer{Clp.Optimizer,MathOptInterface.Utilities.UniversalFallback{MathOptInterface.Utilities.Model{Float64}}}}, ::MathOptInterface.VariablePrimal, ::MathOptInterface.VariableIndex) at C:\Users \.julia\packages\MathOptInterface\k7UUH\src\Bridges\bridge_optimizer.jl:808
 [5] get(::MathOptInterface.Utilities.CachingOptimizer{MathOptInterface.AbstractOptimizer,MathOptInterface.Utilities.UniversalFallback{MathOptInterface.Utilities.Model{Float64}}}, ::MathOptInterface.VariablePrimal, ::MathOptInterface.VariableIndex) at C:\Users\.julia\packages\MathOptInterface\k7UUH\src\Utilities\cachingoptimizer.jl:605
 [6] _moi_get_result(::MathOptInterface.Utilities.CachingOptimizer{MathOptInterface.AbstractOptimizer,MathOptInterface.Utilities.UniversalFallback{MathOptInterface.Utilities.Model{Float64}}}, ::MathOptInterface.VariablePrimal, ::Vararg{Any,N} where N) at C:\Users \.julia\packages\JuMP\e0Uc2\src\JuMP.jl:848
 [7] get(::Model, ::MathOptInterface.VariablePrimal, ::VariableRef) at C:\Users\.julia\packages\JuMP\e0Uc2\src\JuMP.jl:878
 [8] value(::VariableRef; result::Int64) at C:\Users\.julia\packages\JuMP\e0Uc2\src\variables.jl:767
 [9] value at C:\Users\.julia\packages\JuMP\e0Uc2\src\variables.jl:767 [inlined]
 [10] _broadcast_getindex_evalf at .\broadcast.jl:648 [inlined]
 [11] _broadcast_getindex at .\broadcast.jl:621 [inlined]
 [12] getindex at .\broadcast.jl:575 [inlined]
 [13] macro expansion at .\broadcast.jl:932 [inlined]
 [14] macro expansion at .\simdloop.jl:77 [inlined]
 [15] copyto! at .\broadcast.jl:931 [inlined]
 [16] copyto! at .\broadcast.jl:886 [inlined]
 [17] copy at .\broadcast.jl:862 [inlined]
 [18] materialize at .\broadcast.jl:837 [inlined]
 [19] macro expansion at C:\Users\Documents\Julia\Multithreading\multithreadingOpt.jl:66 [inlined]
 [20] (::var"#128#threadsfor_fun#15"{Int64,Int64,Array{Float64,2},Array{Float64,1},Array{Float64,2},UnitRange{Int64}})(::Bool) at .\threadingconstructs.jl:81
 [21] (::var"#128#threadsfor_fun#15"{Int64,Int64,Array{Float64,2},Array{Float64,1},Array{Float64,2},UnitRange{Int64}})() at .\threadingconstructs.jl:48

Error on the actual problem
Usually I get the following error when I make the for loop multithreaded

ERROR: TaskFailedException:
OptimizeNotCalled()
Stacktrace:
 [1] _moi_get_result(::MathOptInterface.Utilities.CachingOptimizer{MathOptInterface.AbstractOptimizer,MathOptInterface.Utilities.UniversalFallback{MathOptInterface.Utilities.Model{Float64}}}, ::MathOptInterface.VariablePrimal, ::Vararg{Any,N} where N) at C:\Users\.julia\packages\JuMP\e0Uc2\src\JuMP.jl:846
 [2] get(::Model, ::MathOptInterface.VariablePrimal, ::VariableRef) at C:\Users\.julia\packages\JuMP\e0Uc2\src\JuMP.jl:878
 [3] value(::VariableRef; result::Int64) at C:\Users\.julia\packages\JuMP\e0Uc2\src\variables.jl:767
 [4] value at C:\Users\.julia\packages\JuMP\e0Uc2\src\variables.jl:767 [inlined]
 [5] _broadcast_getindex_evalf at .\broadcast.jl:648 [inlined]
 [6] _broadcast_getindex at .\broadcast.jl:621 [inlined]
 [7] getindex at .\broadcast.jl:575 [inlined]
 [8] macro expansion at .\broadcast.jl:932 [inlined]
 [9] macro expansion at .\simdloop.jl:77 [inlined]
 [10] copyto! at .\broadcast.jl:931 [inlined]
 [11] copyto! at .\broadcast.jl:886 [inlined]
 [12] copy at .\broadcast.jl:862 [inlined]
 [13] materialize(::Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{1},Nothing,typeof(value),Tuple{Array{VariableRef,1}}}) at .\broadcast.jl:837
 [14] macro expansion at C:\Users\Documents\Julia\Op.jl:150 [inlined]
 [15] (::var"#238#threadsfor_fun#32"{Adjoint{Float64,Array{Float64,2}},Adjoint{Float64,Array{Float64,2}},Adjoint{Float64,Array{Float64,2}},Adjoint{Float64,Array{Float64,2}},Array{Float64,1},Array{Float64,2},Float64,Int64,Int64,Array{Float64,2},Float64,Int64,Array{Float64,1},Array{Float64,1},Array{Float64,2},UnitRange{Int64}})(::Bool) at .\threadingconstructs.jl:81
 [16] (::var"#238#threadsfor_fun#32"{Adjoint{Float64,Array{Float64,2}},Adjoint{Float64,Array{Float64,2}},Adjoint{Float64,Array{Float64,2}},Adjoint{Float64,Array{Float64,2}},Array{Float64,1},Array{Float64,2},Float64,Int64,Int64,Array{Float64,2},Float64,Int64,Array{Float64,1},Array{Float64,1},Array{Float64,2},UnitRange{Int64}})() at .\threadingconstructs.jl:48
Stacktrace:
 [1] wait at .\task.jl:267 [inlined]
 [2] threading_run(::Function) at .\threadingconstructs.jl:34
 [3] macro expansion at .\threadingconstructs.jl:93 [inlined]
 [4] run_eff_front(::String, ::String, ::String, ::String, ::String, ::String; test1_threshold::Float64) at C:\Users\Documents\Julia\Op.jl:143
 [5] top-level scope at none:1

Sometimes, I get a different error on the actual code. See example below:

Exception: EXCEPTION_ACCESS_VIOLATION at 0x70a1cb6a --  at 0x70a1cb6a -- g report with steps to reproduce this fault, and any error messages that follow (in their entirety). Thanks.
Exception: EXCEPTION_ACCESS_VIOLATION at 0x70a1cb6a --  at 0x70a1cb6a -- OLATION with steps to reproduce this fault, and any error messages that follow (in their entirety). Thanks.
Exception: EXCEPTION_ACCESS_VIOLATION at 0x70a1cb6a --  at 0x70a1cb6a -- OLATIONClp_logLevel at C:\Users\Clp_logLevel at C:\Users\.julia\artifacts\b6212337a44c46db8ea6bd090bff66ffe4b3d3d3\bin\libClp-1.dll (unknown line)
in expression starting at none:1

Please submit a bug report with steps to reproduce this fault, and any error messages that follow (in their entirety). Thanks.
Exception: EXCEPTION_ACCESS_VIOLATION at 0x391e0d0 -- (julia) realloc: Invalid argument

signal (22): SIGABRT
port with steps to reproduce this fault, and any error messages that follow (in their entirety). Thanks.
Exception: EXCEPTION_ACCESS_VIOLAin expression starting at  at 0x7ffd9b93557a --  at 0x7ffd9b93557a -- at none:1
memset at C:\WINDOWS\SYSTEM32\ntdll.dll (unknown line)
in expression starting at none:1
memset at C:\WINDOWS\SYSTEM32\ntdll.dll (unknown line)
_ZN4llvm15DWARFUnitVector12addUnitsImplERNS_12DWARFContextERKNS_11DWARFObjectERKNS_12DWARFSectionEPKNS_16DWARFDebugAbbrevEPS7_SC_NS_9StringRefES8_SC_S8_bbbNS_16DWARFSectionKindE at C:\Users\AppData\Local\Programs\Julia 1.5.2\bin\LLVM.dll (unknown line)
in expression starting at none:1
EM32\ntdll.dll (unknown line)
in expression starting at none:1
RtlAllocateHeap at C:\WINDOWS\SYSTEM32\ntdll.dll (unknown line)
malloc at C:\WINDOWS\System32\msvcrt.dll (unknown line)
Znwy at C:\Users\AppData\Local\Programs\Julia 1.5.2\bin\libstdc++-6.dll (unknown line)
ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9_M_mutateEyyPKcy at C:\Users\AppData\Local\Programs\Julia 1.5.2\bin\libstdc++-6.dll (unknown line)
ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_replaceEyyPKcy at C:\Users\ \AppData\Local\Programs\Julia 1.5.2\bin\libstdc++-6.dll (unknown line)
ZNKSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE3strEv at C:\Users\AppData\Local\Programs\Julia 1.5.2\bin\libstdc++-6.dll (unknown line)
crt_sig_handler at /cygdrive/d/buildbot/worker/package_win64/build/src\signals-win.c:92
raise at C:\WINDOWS\System32\msvcrt.dll (unknown line)
abort at C:\WINDOWS\System32\msvcrt.dll (unknown line)
_ZN4llvm15DWARFUnitVector12addUnitsImplERNS_12DWARFContextERKNS_11DWARFObjectERKNS_12DWARFSectionEPKNS_16DWARFDebugAbbrevEPS7_SC_NS_9StringRefES8_SC_S8_bbbNS_16DWARFSectionKindE at C:\Users\AppData\Local\Programs\Julia 1.5.2\bin\LLVM.dll (unknown line)
_ZN4llvm15DWARFUnitVector18addUnitsForSectionERNS_12DWARFContextERKNS_12DWARFSectionENS_16DWARFSectionKindE at C:\Users\AppData\Local\Programs\Julia 1.5.2\bin\LLVM.dll (unknown line)
realloc_s at /cygdrive/d/buildbot/worker/package_win64/build/src/support\dtypes.h:379 [inlined]
realloc_s at /cygdrive/d/buildbot/worker/package_win64/build/src/support\dtypes.h:371 [inlined]
jl_copy_str at /cygdrive/d/buildbot/worker/package_win64/build/src\julia_internal.h:818 [inlined]
jl_dylib_DI_for_fptr at /cygdrive/d/buildbot/worker/package_win64/build/src\debuginfo.cpp:1098
jl_getDylibFunctionInfo at /cygdrive/d/buildbot/worker/package_win64/build/src\debuginfo.cpp:1176 [inlined]
jl_getFunctionInfo at /cygdrive/d/buildbot/worker/package_win64/build/src\debuginfo.cpp:1243
_ZNK12_GLOBAL__N_116DWARFObjInMemory19forEachInfoSectionsEN4llvm12function_refIFvRKNS1_12DWARFSectionEEEE at C:\Users\AppData\Local\Programs\Julia 1.5.2\bin\LLVM.dll (unknown line)
jl_print_native_codeloc at /cygdrive/d/buildbot/worker/package_win64/build/src\stackwalk.c:652
jl_critical_error at /cygdrive/d/buildbot/worker/package_win64/build/src\signal-handling.c:239 [inlined]
jl_exception_handler at /cygdrive/d/buildbot/worker/package_win64/build/src\signals-win.c:302
ams\Julia 1.5.2\bin\LLVM.dll (unknown line)
jl_exception_handler at /cygdrive/d/buildbot/worker/package_win64/build/src\signals-win.c:302
__julia_personality at /cygdrive/d/buildbot/worker/package_win64/build/src/support\win32_ucontext.c:28
_chkstk at C:\WINDOWS\SYSTEM32\ntdll.dll (unknown line)
RtlRaiseException at C:\WINDOWS\SYSTEM32\ntdll.dll (unknown line)
KiUserExceptionDispatcher at C:\WINDOWS\SYSTEM32\ntdll.dll (unknown line)

The essential steps in the actual problem are similar to the question I asked here. The difference is that in this question multithreading is being used for an optimisation problem.

Sometimes your model is infeasible and does not have a solution.

After optimize!, you should check termination_status(model) == MOI.OPTIMAL and/or primal_status(model) == MOI.FEASIBLE_POINT before accessing value.(x).

Documentation: https://jump.dev/JuMP.jl/stable/solutions/#Obtaining-solutions-1

That makes sense for the example of the code I provided. The bit I am struggling is with the error I am getting on my actual problem for which I have not been able to put together a MWE.

I get this error on the actual problem when I use the @threads macro before the for loop. The code for this optimisation problem is more complex but the loop where I am trying to use multithreading is doing operations in a very similar manner to the example code provided above.

Does this error message suggest a problem with the use of @threads macro? In serial the same code runs without any issues.

I don’t think we can provide advice without a MWE.

OptimizeNotCalled suggests you are attempting querying the solution of a variable before calling optimize!.

Note that every thread should have a separate copy of the model, and that you cannot pass a model between threads.

Ok. I will try to create a MWE that reproduces this error and post it here.

Errors similar to the one I get has been reported by others. I think it may be a bug in multithreading.

The error is almost certainly due to how you are calling Clp, and not a bug in multithreading. We need a MWE.

Hi @odow, I have finally managed to create a MWE that reproduces the error! If I get rid of Threads.@threads before the for loop in run_modelthreading(;α=0.05) function then the error goes away.

Please could you advise on this?

Source of the error
The lines 2-4 of the function run_modelthreading(;α=0.05) apparently are causing this error. If I get rid of them and replace with Limit = 50 then the code runs fine.

model, x, y, γ = build_model(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
optimize!(model)
Limit = objective_value(model)

MWE

using JuMP
using Clp
function generate_data()
    # Generate random data
    InitialValue= float(rand(80:140,1,5))
    FutureValue = float(rand(40:150,1000,5))
    Demand = float(rand(40:50,1000))
    nscen = size(FutureValue,1)
    nproducts = size(FutureValue,2)
    prob = 1/nscen*ones(nscen)
    Loss = zeros(Float64,nscen,nproducts)
    Loss = -FutureValue.+InitialValue
    return FutureValue, Loss, Demand,nproducts, prob, nscen
end

function build_model(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
    model = Model(Clp.Optimizer)
    @variable(model,x[b=1:nproducts]>=0)
    @variable(model,y[b=1:nscen]>=0)
    @variable(model,γ>=0)
    @constraint(model,budget,sum(x[i] for i =1:nproducts) == 1)
    @constraint(model,constraint2[j in 1:nscen], y[j]-sum(Loss[j,i]*x[i] for i in 1:nproducts) + γ >= 0)
    @constraint(model,constraint3[j in 1:nscen], sum(FutureValue[j,i]*x[i] for i in 1:nproducts)-Demand[j] >= 0)
    @objective(model,Min,γ+1/(α)*sum(y[j]*prob[j] for j =1:nscen))
    return model, x, y, γ
end

function run_modelthreading(;α=0.05)
    FutureValue, Loss, Demand,nproducts, prob, nscen = generate_data()
    model, x, y, γ = build_model(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
    optimize!(model)
    Limit = objective_value(model)
    n = 10
    VaR = Vector{Float64}(undef,n)
    CVaR = Vector{Float64}(undef,n)
    Product = Array{Float64,2}(undef, nproducts,n)
    Threads.@threads for i in 1:n
        α = (10-(i-1))/Limit
        model, x, y, γ = build_model(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
        set_optimizer_attribute(model, MOI.Silent(), true)
        optimize!(model)
        VaR[i] = JuMP.value.(γ)
        CVaR[i] = objective_value(model)
        Product[:,i] = JuMP.value.(x)
    end
    return VaR, CVaR, Product
end

Runtime code
run_modelthreading(;α=0.05)

Try it with https://github.com/jump-dev/Clp.jl/pull/108

] add Clp#od/c

Otherwise, try encapsulating the optimize call in the buildmodel function, and just return the data you need, rather than the model and variables.

Maybe there is something weird:

using GLPK

mutable struct Foo
    ptr::Ptr{Cvoid}
    function Foo()
        ptr = glp_create_prob()
        @info "Creating $(ptr)"
        foo = new(ptr)
        finalizer(foo) do f
            ccall(:jl_safe_printf, Cvoid, (Cstring, Cstring), "Finalizing %s.\n", repr(f.ptr))
            glp_delete_prob(f)
        end
        return foo
    end
end

Base.cconvert(::Type{Ptr{Cvoid}}, f::Foo) = f
Base.unsafe_convert(::Type{Ptr{Cvoid}}, f::Foo) = f.ptr

function main_pass()
    for i = 1:2
        Foo()
    end
end

function main_fail()
    Threads.@threads for i = 1:2
        Foo()
    end
end

julia> main_pass()
[ Info: Creating Ptr{Nothing} @0x00007ff24a155980
[ Info: Creating Ptr{Nothing} @0x00007ff24a380d70

julia> GC.gc()
Finalizing Ptr{Nothing} @0x00007ff24a380d70.
Finalizing Ptr{Nothing} @0x00007ff24a155980.

julia> main_fail()
[ Info: Creating Ptr{Nothing} @0x00007ff24a389140
[ Info: Creating Ptr{Nothing} @0x00007ff24a13cad0

julia> GC.gc()
Finalizing Ptr{Nothing} @0x00007ff24a13cad0.
Finalizing Ptr{Nothing} @0x00007ff24a389140.
glp_free: memory allocation error
Error detected in file env/alloc.c at line 72

signal (6): Abort trap: 6
in expression starting at REPL[10]:1
__pthread_kill at /usr/lib/system/libsystem_kernel.dylib (unknown line)
Allocations: 8019758 (Pool: 8017483; Big: 2275); GC: 9

I don’t fully understand the ramifications of external C libraries and threads.

Thanks.

I agree there is something weird happening. I don’t think the error is specific to Clp solver, as GLPK also gives an error.

glp_free: ptr = 0000000036172DF0; invalid pointer
glp_set_col_bnds: j = 3; column number out of range
Error detected in file api/prob1.c at line 632

signal (22): SIGABRT
in expression starting at none:1
Error detected in file env/alloc.c at line 59

Julia has exited.
Press Enter to start a new session.

My workaround is similar to the one you suggested to return values by defining an extra function build_model_t(.......). I have given my my code below for the benefit of others in case it helps with fixing the issue or documenting how to use multithreading with JuMP.

using JuMP
using Clp
function generate_data()
    # Generate random data
    InitialValue= float(rand(80:140,1,5))
    FutureValue = float(rand(40:150,1000,5))
    Demand = float(rand(40:50,1000))
    nscen = size(FutureValue,1)
    nproducts = size(FutureValue,2)
    prob = 1/nscen*ones(nscen)
    Loss = zeros(Float64,nscen,nproducts)
    Loss = -FutureValue.+InitialValue
    return FutureValue, Loss, Demand,nproducts, prob, nscen
end

function build_model(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
    model = Model(Clp.Optimizer)
    @variable(model,x[b=1:nproducts]>=0)
    @variable(model,y[b=1:nscen]>=0)
    @variable(model,γ>=0)
    @constraint(model,budget,sum(x[i] for i =1:nproducts) == 1)
    @constraint(model,constraint2[j in 1:nscen], y[j]-sum(Loss[j,i]*x[i] for i in 1:nproducts) + γ >= 0)
    @constraint(model,constraint3[j in 1:nscen], sum(FutureValue[j,i]*x[i] for i in 1:nproducts)-Demand[j] >= 0)
    @objective(model,Min,γ+1/(α)*sum(y[j]*prob[j] for j =1:nscen))
    return model, x, y, γ
end
function build_model_t(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
    model = Model(Clp.Optimizer)
    @variable(model,x[b=1:nproducts]>=0)
    @variable(model,y[b=1:nscen]>=0)
    @variable(model,γ>=0)
    @constraint(model,budget,sum(x[i] for i =1:nproducts) == 1)
    @constraint(model,constraint2[j in 1:nscen], y[j]-sum(Loss[j,i]*x[i] for i in 1:nproducts) + γ >= 0)
    @constraint(model,constraint3[j in 1:nscen], sum(FutureValue[j,i]*x[i] for i in 1:nproducts)-Demand[j] >= 0)
    @objective(model,Min,γ+1/(α)*sum(y[j]*prob[j] for j =1:nscen))
    optimize!(model)
    Limit = objective_value(model)
    return Limit
end

function run_modelthreading(;α=0.05)
    FutureValue, Loss, Demand,nproducts, prob, nscen = generate_data()
    Limit = build_model_t(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
    n = 10
    VaR = Vector{Float64}(undef,n)
    CVaR = Vector{Float64}(undef,n)
    Product = Array{Float64,2}(undef, nproducts,n)
    Threads.@threads for i in 1:n
        α = (10-(i-1))/Limit
        model, x, y, γ = build_model(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
        set_optimizer_attribute(model, MOI.Silent(), true)
        optimize!(model)
        VaR[i] = JuMP.value.(γ)
        CVaR[i] = objective_value(model)
        Product[:,i] = JuMP.value.(x)
    end
    return VaR, CVaR, Product
end

You are maybe having a race condition do to the model variable (and others) assignment outside the @threads loop. That can cause a thread to use a model that is not optimize! yet as pointed out by @odow.

That could explain also this:

It can be fixed (I think) in your MWE making the model asigment inside the @threads loop local

function run_modelthreading(;α=0.05)
    [...]
    # This make all this variables visibles between all threads
    model, x, y, γ = build_model(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
    [...]
    Threads.@threads for i in 1:n
       [...]
       # This will force this variables to be local for each thread
       local model, x, y, γ = build_model(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
       [...]
    end
    return [...]
end

In my MWE, I am querying the solution after calling optimize.

I do not understand how a race condition can arise for assignment outside the @threads loop. My workaround is to not call the build_model function before the parallel loop, but to return the optimisation results from another function. Inside the parallel loop I have not made the variable/model assignment local (see code above).

I hit this same issue recently, see the link. julia scope rule is not very intuitively in this case. An assignment to a variable, before or after, the @threads for loop will make it have an scope outside the threads section.

Run this example julia t3 test.jl

# test.jl
import Base.Threads: @threads, nthreads

function test()
    @threads :static for i in 1:100

        d = Dict(:a => Dict())
        a = d[:a]
        @assert a === d[:a]

        # Line 12: This throw an AssertionError
        for j in 1:100; @assert a === d[:a] end 
    end
    a = [] # If this line is commented the problem disappears 
end

println("Running test (-t$(nthreads()))")
for i = 1:500; test() end

But now you are not assigning any of the variables used inside the @threads loop in the outside.

function run_modelthreading(;α=0.05)
    [...]
    # Not the sames variables (not as before)
    Limit = build_model_t(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
    [...]
    Threads.@threads for i in 1:n
        α = (10-(i-1))/Limit
        # The only place where you assign this variables (not as before)
        model, x, y, γ = build_model(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
        set_optimizer_attribute(model, MOI.Silent(), true)
        optimize!(model)
        [...]
    end
    return VaR, CVaR, Product
end

I have tested your suggestion of making the variable assignment inside the loop local. On another model with actual data this gives incorrect results. If a race condition arises then use of local may be not be a good idea. It may have worked for your specific case, but in the MWE above it is mixing up something.

Locks can be used in a race condition, but this tend to slowdown the program. If possible, I would prefer to code it in a way that race condition is avoided. I also got into problem with my workaround code above when I tried to use it on another problem.

This is my code for the MWE that works well and avoids race condition. I believe this is exactly what @odow suggested using.

using JuMP
using Clp
function generate_data()
    # Generate random data
    InitialValue= float(rand(80:140,1,5))
    FutureValue = float(rand(40:150,100,5))
    Demand = float(rand(40:50,100))
    nscen = size(FutureValue,1)
    nproducts = size(FutureValue,2)
    prob = 1/nscen*ones(nscen)
    Loss = zeros(Float64,nscen,nproducts)
    Loss = -FutureValue.+InitialValue
    return FutureValue, Loss, Demand,nproducts, prob, nscen
end

function build_model(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
    model = Model(Clp.Optimizer)
    set_optimizer_attribute(model, MOI.Silent(), true)
    @variable(model,x[b=1:nproducts]>=0)
    @variable(model,y[b=1:nscen]>=0)
    @variable(model,γ>=0)
    @constraint(model,budget,sum(x[i] for i =1:nproducts) == 1)
    @constraint(model,constraint2[j in 1:nscen], y[j]-sum(Loss[j,i]*x[i] for i in 1:nproducts) + γ >= 0)
    @constraint(model,constraint3[j in 1:nscen], sum(FutureValue[j,i]*x[i] for i in 1:nproducts)-Demand[j] >= 0)
    @objective(model,Min,γ+1/(α)*sum(y[j]*prob[j] for j =1:nscen))
    optimize!(model)
    γ_result = JuMP.value.(γ)
    Objective_result = objective_value(model)
    DecisionVariable_result = JuMP.value.(x)
    return γ_result, Objective_result, DecisionVariable_result
end

function run_modelthreading(;n=10,α=0.05)
    FutureValue, Loss, Demand,nproducts, prob, nscen = generate_data()
    γ_result, Objective_result, DecisionVariable_result = build_model(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
    Limit = Objective_result
    VaR = Vector{Float64}(undef,n)
    CVaR = Vector{Float64}(undef,n)
    Product = Array{Float64,2}(undef, nproducts,n)
    Threads.@threads for i in 1:n
        α = (n-(i-1))/Limit
        γ_result, Objective_result, DecisionVariable_result = 
        build_model(FutureValue,Loss,Demand,nproducts,prob,nscen,α)
        VaR[i] = γ_result
        CVaR[i] = Objective_result
        Product[:,i] = DecisionVariable_result
    end
    return VaR, CVaR, Product
end

run_modelthreading(;n=10,α=0.05)