SharedArray - not updated

I have a problem with the `ekwg_monte_carlo_bias` function. In this function there are `SharedArray` which are not updated. Any suggestion?

Note: The function works as expected by replacing `@parallel for i in 1:m` with `for i in 1: m` and deleting `addprocs (Sys.CPU_CORES)`.

Julia Code:

@everywhere using Distributions
@everywhere using Optim

@everywhere function gexp(x,par)
λ = par[1]
λ * exp(-λ * x)
end

@everywhere function Gexp(x,par)
λ = par[1]
1- exp(-λ * x)
end

@everywhere function QGexp(x,par)
λ = par[1]
quantile.(Exponential(1/λ),x)
end

@everywhere function sample_ekwg(QG, n, par0, par1…)
a = par0[1]
b = par0[2]
c = par0[3]

``````u = rand(n)

p = (1 - (1 - u.^(1/c)).^(1/b)).^(1/a)

QG(p, par1...)
``````

end

@everywhere function cdf_ekwg(cdf, x, par0, par1…)
a = par0[1]
b = par0[2]
c = par0[3]

(1 - (1 - cdf.(x,par1…).^a).^b).^c
end

@everywhere function pdf_ekwg(cdf, pdf, x, par0, par1…)
a = par0[1]
b = par0[2]
c = par0[3]

g = pdf(x, par1…)
G = cdf(x, par1…)

a * b * c * g * G.^(a-1) * (1-G.^a).^(b-1) * (1 - (1-G.^a).^b).^(c-1)

end

@everywhere function loglike(cdf, pdf, x, par0, par1…)
n = length(x)
soma = 0
for i = 1:n
soma += log(pdf_ekwg(cdf, pdf, x[i], par0, par1…))
end
return -soma
end

@everywhere function myoptimize(sample_boot)
try
optimize(par0 → loglike(G, g, sample_boot, par0, par1…), starts,
Optim.Options(g_tol = 1e-2))
catch
-1
end
end

@everywhere function ekwg_bootstrap_bias(B, G, g, data, original_estimates, starts, par1…)

result_boot = SharedArray{Float64}(length(original_estimates)*B)

j = 1
while j <= B
sample_boot = sample(data, length(data), replace = true)

``````  result = myoptimize(sample_boot)

if (result == -1) || (result.g_converged == false)
continue
end
result_boot[(3*j-2):3*j] = result.minimizer
j = j+1
``````

end
estimates_matrix = convert.(Float64,reshape(result_boot,length(starts),B))’

error = std(estimates_matrix,1)

return error, (2.*original_estimates’ .- mean(estimates_matrix,1))’
end

function ekwg_monte_carlo_bias(M, B, n, true_parameters, seed, par1…)

``````result_mc_correct_vector = SharedArray{Float64}(length(true_parameters)*M)
result_mc_vector = SharedArray{Float64}(length(true_parameters)*M)
result_error_boot = SharedArray{Float64}(length(true_parameters)*M)

#for i in 1:M

@sync @parallel for i in 1:M
true_sample = sample_ekwg(QGexp, n, true_parameters, par1...)
result_mc = myoptimize(true_sample)

if result_mc != -1
result_mc_vector[(3*i-2):3*i] = result_mc.minimizer
result_error_boot[(3*i-2):3*i],result_mc_correct_vector[(3*i-2):3*i] = ekwg_bootstrap_bias(b, g, g, true_sample,
result_mc.minimizer, true_parameters, par1...)
end
end

output1 = convert.(Float64,reshape(result_mc_vector,length(true_parameters),M))'
output2 = convert.(Float64,reshape(result_mc_correct_vector,length(true_parameters),M))'
output3 = convert.(Float64,reshape(result_error_boot,length(true_parameters),M))'
return (mean(output1,1),mean(output2,1),mean(output3,1))
``````

end

true_parameters = [1.0,1.0,1.0];
par1 = 1.5;
m = 10;
b = 50;
n = 100;

@time mc_estimates, mc_estimates_boot, mc_error_boot = ekwg_monte_carlo_bias(m, b, n, true_parameters, 0, par1)