I am running a multithreaded parameter tuning for some ML algorithms.
I can avoid allocation in the input using CartesianIndices (thanks, stevengj !) but how can I avoid allocation in the output ? I would likely need to lock, in the example below, the variables bestError
, bestPar1
and bestPar2
, but how ?
par1 = [1,2]
par2 = [0.1,0.2,0.3]
doMyStuff(par1,par2) = abs(0-(-par1^3+par1^2+par1+par2^3-par2^2+par2+10))
# Version 1: Preallocation, computation and comparison
# Step A : preallocation
errorMatrix = fill(Inf64,length(par1),length(par2))
# Step B: computation
function tuneParameters!(errorMatrix,par1,par2)
Threads.@threads for ij in CartesianIndices((length(par1),length(par2)))
(p1i, p2j) = par1[Tuple(ij)[1]], par2[Tuple(ij)[2]]
errorMatrix[Tuple(ij)...] = doMyStuff(p1i,p2j)
end
return errorMatrix
end
tuneParameters!(errorMatrix,par1,par2)
# Step C: comparison
bestError = minimum(errorMatrix)
bestPar1, bestPar2 = par1[Tuple(argmin(errorMatrix))[1]], par2[Tuple(argmin(errorMatrix))[2]]
# Version 2: computation and comparision inside the loop
function tuneParametersB(par1,par2)
# Step A : initialisation
bestError = Inf64
bestPar1 = nothing
bestPar2 = nothing
# Step B: computation and comparision
Threads.@threads for ij in CartesianIndices((length(par1),length(par2)))
(p1i, p2j) = par1[Tuple(ij)[1]], par2[Tuple(ij)[2]]
attempt = doMyStuff(p1i,p2j)
if(attempt < bestError) # This would need a lock I presume...
bestError = attempt
bestPar1 = p1i
bestPar2 = p2j
end
end
return (bestError,bestPar1, bestPar2)
end
bestError , bestPar1, bestPar2 = tuneParametersB(par1,par2)
Edit: Crosspost on SO: multithreading - How to "lock" variables in @threads? - Stack Overflow