Suppress Error message and draw another random numbs during simulation

Thank you for the response. I think the minimal code is as follows. The problem I’m talking about is basically doing the last line a lot of times and calculating the mean of them. To do this, I need to ignore the error message during the for loop.

I’m not very knowledgeable about the issue you mentioned. Could you elaborate more?

using DataFrames, Random, LinearAlgebra, Distributions, LineSearches, Optim, NLSolversBase

function CRRA_bernoulli(ω::Real,x::Integer)
    if ω == 1.00
        val = log( exp(1), x )
    else
        val = x ^ ( 1 - ω ) / ( 1 - ω )
    end
    return val
end

function CRRA_EU(ω::Real,DP_monetary::Vector,p::Real)
    p * CRRA_bernoulli(ω,DP_monetary[1]) + (1-p) * CRRA_bernoulli(ω,DP_monetary[2])
end

function CE_EU(ω,EU)
    (( 1 - ω ) * EU) ^ ( 1 / ( 1 - ω ) )
end

function LogitProbx(λ::Real, U_x::Real, U_y::Real)
    exp( λ * U_x ) / ( exp( λ * U_x ) + exp( λ * U_y ) )
end

function loglikelihood_RPM_Logit(ω::Real,λ::Real,Data::Array,ω⃰::Vector)
    val = 0.0
    n_obs = size(Data,1)
    aggData = sum(Data, dims = 1) # make a row vector with aggregate choices.
    for DPindex = 1:4, prob_index = 1:9 # note that for p=1.0, it makes probability 0 or 1 which spoils loglikelihood method
        val += aggData[ (DPindex-1) * 10 + prob_index ] *
        log(exp(1), LogitProbx(λ, ω⃰[ (DPindex-1) * 10 + prob_index ], ω)) +
        (n_obs - aggData[ (DPindex-1) * 10 + prob_index ]) *
        log(exp(1), 1-LogitProbx(λ, ω⃰[ (DPindex-1) * 10 + prob_index ], ω))
    end
    return val
end

function loglikelihood_RUM_Gumbel(ω::Real,λ::Real,Data::Array,DP::Matrix)
    val = 0.0
    p = [0.1:0.1:1;]
    n_obs = size(Data,1)
    aggData = sum(Data, dims = 1) # make a row vector with aggregate choices.
    for DPindex = 1:4, prob_index = 1:9 # note that for p=1.0, it makes probability 0 or 1 which spoils loglikelihood method
        X = aggData[ (DPindex-1) * 10 + prob_index ];Y = n_obs - X;
        Ux = CRRA_EU(ω,DP[ 2 * ( DPindex - 1 ) + 1 : 2 * ( DPindex - 1 ) + 2,1] , p[prob_index]);
        Uy = CRRA_EU(ω,DP[ 2 * ( DPindex - 1 ) + 1 : 2 * ( DPindex - 1 ) + 2,2] , p[prob_index]);
        Px = LogitProbx(λ, Ux, Uy); Py = 1-Px;
        lnPx = log(exp(1), Px); lnPy = log(exp(1), Py);
        val += X * lnPx + Y * lnPy;
    end
    return val
end

function LeastSquareCE_ver1(ω_1::Real,λ::Real,Data::Array,DP::Matrix)
    val = 0.0
    p = [0.1:0.1:1;]
    n_obs = size(Data,1)
    aggData = sum(Data, dims = 1)
    for DPindex = 1:4, prob_index = 1:9
        X = aggData[ (DPindex-1) * 10 + prob_index ];Y = n_obs - X;
        Ux = CRRA_EU(ω_1,DP[ 2 * ( DPindex - 1 ) + 1 : 2 * ( DPindex - 1 ) + 2,1] , p[prob_index]);
        Uy = CRRA_EU(ω_1,DP[ 2 * ( DPindex - 1 ) + 1 : 2 * ( DPindex - 1 ) + 2,2] , p[prob_index]);
        CEx = CE_EU(ω_1,Ux); CEy = CE_EU(ω_1,Uy); maxCExy = max(CEx,CEy);
        Px = LogitProbx(λ, CEx - maxCExy, CEy - maxCExy); Py = 1-Px;
        val += (Px - X / n_obs) ^ 2
    end
    return val
end

function DecisionProblemGenerator()
    DP1_monetary1 = [3850; 100];DP1_monetary2 = [2000; 1600];
    DP2_monetary1 = [4000; 500];DP2_monetary2 = [2250; 1500];
    DP3_monetary1 = [4000; 150];DP3_monetary2 = [2000; 1750];
    DP4_monetary1 = [4500; 50];DP4_monetary2 = [2500; 1000];
    DP1 = hcat(DP1_monetary1,DP1_monetary2);
    DP2 = hcat(DP2_monetary1,DP2_monetary2);
    DP3 = hcat(DP3_monetary1,DP3_monetary2);
    DP4 = hcat(DP4_monetary1,DP4_monetary2);
    DP = vcat(DP1,DP2,DP3,DP4);
    p = [0.1:0.1:1.0;]; # 10 questions for each gamble with different probability p

    return DP1_monetary1,DP1_monetary2, DP2_monetary1,DP2_monetary2,DP3_monetary1,DP3_monetary2,    DP4_monetary1,DP4_monetary2, DP, p
end


function Experiment_RUM(n::Integer,ω::Real,μ_ε::Real,λ::Real,
    init_RPM::Vector,init_RUM::Vector,init_CE::Vector)
    # n: number of observations, λ: scale parameter
    # Generate Decision Problem
    DP1_monetary1,DP1_monetary2,DP2_monetary1,DP2_monetary2,
    DP3_monetary1,DP3_monetary2,DP4_monetary1,DP4_monetary2,DP,p = 
    DecisionProblemGenerator()
    # Generate data #
    U = rand(Uniform(0,1),n * 8) # error occurs for each lotteries.
    ε = μ_ε .- log.(exp(1), log.(exp(1), U) .* (-1)) ./ λ # Generated true data from Gumbel
    n_p = length(p); n_D = n_p * 4;
    Data = zeros(n,n_D);
    for i = 1:n, DPindex = 1:4, prob_index = 1:n_p
        if DPindex == 1
            Data[i,n_p * (DPindex-1) + prob_index] = 
            ifelse( CRRA_EU(ω,DP1_monetary1,p[prob_index]) + ε[(i-1) * 8 + (DPindex-1) * 2 + 1] > CRRA_EU(ω,DP1_monetary2,p[prob_index]) + ε[(i-1) * 8 + (DPindex-1) * 2 + 2], 1, 0)
        elseif DPindex == 2
            Data[i,n_p * (DPindex-1) + prob_index] = 
            ifelse( CRRA_EU(ω,DP2_monetary1,p[prob_index]) + ε[(i-1) * 8 + (DPindex-1) * 2 + 1] > CRRA_EU(ω,DP2_monetary2,p[prob_index]) + ε[(i-1) * 8 + (DPindex-1) * 2 + 2], 1, 0)
        elseif DPindex == 3
            Data[i,n_p * (DPindex-1) + prob_index] = 
            ifelse( CRRA_EU(ω,DP3_monetary1,p[prob_index]) + ε[(i-1) * 8 + (DPindex-1) * 2 + 1] > CRRA_EU(ω,DP3_monetary2,p[prob_index]) + ε[(i-1) * 8 + (DPindex-1) * 2 + 2], 1, 0)
        else
            Data[i,n_p * (DPindex-1) + prob_index] = 
            ifelse( CRRA_EU(ω,DP4_monetary1,p[prob_index]) + ε[(i-1) * 8 + (DPindex-1) * 2 + 1] > CRRA_EU(ω,DP4_monetary2,p[prob_index]) + ε[(i-1) * 8 + (DPindex-1) * 2 + 2], 1, 0)
        end
    end # Data has been generated #
    # Now we start the estimation with each model #

    ### 1. Start with RPM estimation ###
    ## To do the estimation, we have to find the ω(x,y) for each of the 40 problems ##
    ω⃰ = zeros(n_D);
    for DPindex = 1:4, prob_index = 1:(n_p-1)
        ω_init = -5.0 # Should start in a sufficiently small values to get the correct values #
        count = 1
        tol = 1000
        while abs(tol) > 10^(-8)
            tol = CRRA_EU(ω_init,DP[ 2 * ( DPindex - 1 ) + 1 : 2 * ( DPindex - 1 ) + 2,1] , p[prob_index]) - CRRA_EU(ω_init , DP[ 2 * (DPindex - 1) + 1 : 2 * (DPindex - 1 ) + 2,2] , p[prob_index])
            ω_renew = ω_init - tol / (CRRA_EU_Gradient(ω_init, DP[ 2 * ( DPindex - 1 ) + 1 : 2 * ( DPindex - 1 ) + 2 , 1 ],p[prob_index]) - 
            CRRA_EU_Gradient(ω_init, DP[ 2 * ( DPindex - 1 ) + 1 : 2 * ( DPindex - 1 ) + 2 , 2],p[prob_index]))
            ω_init = copy(ω_renew)
        end
        ω⃰[(DPindex - 1) * n_p + prob_index] = ω_init
    end

    # Now we have the equalizer ω(x,y)s. Next is to do the MLE #
    opt_RPM_NelderMead = optimize(x->-loglikelihood_RPM_Logit(x[1],x[2],Data,ω⃰), 
    init_RPM)
    parameters_RPM_NelderMead = Optim.minimizer(opt_RPM_NelderMead)
        
    opt_RPM_SimulatedAnnealing = optimize(x->-loglikelihood_RPM_Logit(x[1],x[2],Data,ω⃰), 
    init_RPM,SimulatedAnnealing())
    parameters_RPM_SimulatedAnnealing = Optim.minimizer(opt_RPM_SimulatedAnnealing)

    opt_RPM_LBFGS = optimize(x->-loglikelihood_RPM_Logit(x[1],x[2],Data,ω⃰), 
    init_RPM,LBFGS()) # Found out that autodiff = :forward doesn't work here
    parameters_RPM_LBFGS = Optim.minimizer(opt_RPM_LBFGS) 

    ### 2. Start RUM estimation ###
    ## we have already calculated the expected utility of the 36 problems ##
    opt_RUM_NelderMead = optimize(x->-loglikelihood_RUM_Gumbel(x[1],x[2],Data,DP), 
    init_RUM)
    parameters_RUM_NelderMead = Optim.minimizer(opt_RUM_NelderMead)

    opt_RUM_SimulatedAnnealing = optimize(x->-loglikelihood_RUM_Gumbel(x[1],x[2],Data,DP), 
    init_RUM,SimulatedAnnealing(),Optim.Options(iterations = 1000))
    parameters_RUM_SimulatedAnnealing = Optim.minimizer(opt_RUM_SimulatedAnnealing)
        
    opt_RUM_LBFGS = optimize(x->-loglikelihood_RUM_Gumbel(x[1],x[2],Data,DP), 
    init_RUM,LBFGS())
    parameters_RUM_LBFGS = Optim.minimizer(opt_RUM_LBFGS)

    opt_RUM_LBFGS = optimize(x->-loglikelihood_RUM_Gumbel(x[1],x[2],Data,DP), 
    init_RUM,LBFGS(); autodiff = :forward)
    parameters_RUM_LBFGS = Optim.minimizer(opt_RUM_LBFGS)
    
    ### 3. Start CE estimation ###
    opt_CE_NelderMead = optimize(x->LeastSquareCE_ver1(x[1],x[2],Data,DP), 
    init_CE)
    parameters_CE_NelderMead = Optim.minimizer(opt_CE_NelderMead)

    opt_CE_SimulatedAnnealing = optimize(x->LeastSquareCE_ver1(x[1],x[2],Data,DP), 
    init_CE,SimulatedAnnealing(),Optim.Options(iterations = 1000)) # Didn't work
    parameters_CE_SimulatedAnnealing = Optim.minimizer(opt_CE_SimulatedAnnealing)

    opt_CE_LBFGS = optimize(x->LeastSquareCE_ver1(x[1],x[2],Data,DP), 
    init_CE,LBFGS(); autodiff = :forward) # Found out that autodiff = :forward doesn't work here
    parameters_CE_LBFGS = Optim.minimizer(opt_CE_LBFGS)    

    DF = DataFrame(Model = ["Random Parameter","Random Utility","CE"], NM_ω = [opt_RPM_NelderMead.minimizer[1],opt_RUM_NelderMead.minimizer[1],opt_CE_NelderMead.minimizer[1]],
    NM_λ = [opt_RPM_NelderMead.minimizer[2],opt_RUM_NelderMead.minimizer[2],opt_CE_NelderMead.minimizer[2]],
    NM_cvg = [Optim.converged(opt_RPM_NelderMead),Optim.converged(opt_RUM_NelderMead),Optim.converged(opt_CE_NelderMead)],
    SA_ω =[opt_RPM_SimulatedAnnealing.minimizer[1],opt_RUM_SimulatedAnnealing.minimizer[1],opt_CE_SimulatedAnnealing.minimizer[1]],
    SA_λ =[opt_RPM_SimulatedAnnealing.minimizer[2],opt_RUM_SimulatedAnnealing.minimizer[2],opt_CE_SimulatedAnnealing.minimizer[2]],
    SA_cvg = [Optim.converged(opt_RPM_SimulatedAnnealing),Optim.converged(opt_RUM_SimulatedAnnealing),Optim.converged(opt_CE_SimulatedAnnealing)],
    LBFGS_ω = [opt_RPM_LBFGS.minimizer[1],opt_RUM_LBFGS.minimizer[1],opt_CE_LBFGS.minimizer[1]],
    LBFGS_λ = [opt_RPM_LBFGS.minimizer[2],opt_RUM_LBFGS.minimizer[2],opt_CE_LBFGS.minimizer[2]],
    LBFGS_cvg = [Optim.converged(opt_RPM_LBFGS),Optim.converged(opt_RUM_LBFGS),Optim.converged(opt_CE_LBFGS)])
    show(stdout, MIME("text/latex"), DF)
    return(DF)
end

Random.seed!(1111)
#n::Integer,ω::Real,μ_ε::Real,λ::Real,init_RPM::Vector,init_RUM::Vector,init_CE::Vector#
Experiment_RUM(10000,1.0,0.0,1.0,[1.2,1.2],[1.2,1.2],[0.5,0.5])