Gaussian Copula Priors in Turing: Auto-differentiation Error with Beta Quantile Function

I’m trying to fit a Bayesian model in Turing that uses Gaussian copula priors for parameters bounded in (0, 1). The baseline model with independent priors works fine, but when I introduce copula-based correlations, I encounter an auto-differentiation error.

The basic model with independent Beta priors runs without issues:

using Turing, Distributions, Copulas, LinearAlgebra, Plots, StatsPlots

# function to simulate data from Zhou's model
function simdata(
    n;
    π = .1,
    se = .9,
    sp = .8,
    ν_00 = .5,
    ν_01 = .7,
    ν_10 = .6,
    ν_11 = .9)
    
    p_s1 = ν_11 * se * π
    p_s2 = ν_01 * (1 - se) * π
    p_r1 = ν_10 * (1 - sp) * (1 - π)
    p_r2 = ν_00 * sp * (1 - π)
    p_u1 = (1 - ν_11) * se * π + (1 - ν_10) * (1 - sp) * (1 - π)
    p_u2 = (1 - ν_01) * (1 - se) * π + (1 - ν_00) * sp * (1 - π)
    
    d = Distributions.Multinomial(n, [p_s1, p_s2, p_r1, p_r2, p_u1, p_u2])
    
    rand(d, 1)
end

# reparametrizing Beta distribution in term of its moments
function Beta2(μ, σ)
    σ² = σ^2
    if σ² >= μ*(1 - μ)
        σ² = 0.99*μ*(1 - μ)
    end
    γ = μ*(1 - μ)/σ² - 1
    α = μ*γ
    β = (1 - μ)*γ
    Distributions.Beta(α, β)
end

# Zhou's model
@model function zhou_model(y, priors)
    # Extract data components
    n = sum(y)
    
    # Prior distributions for diagnostic test parameters
    π ~ priors[:π]      # Disease prevalence
    se ~ priors[:se]    # Sensitivity
    sp ~ priors[:sp]    # Specificity
    
    # Prior distributions for verification process parameters
    ν_00 ~ priors[:ν_00]   # P(V=1|T=0,D=0)
    ν_01 ~ priors[:ν_01]   # P(V=1|T=0,D=1)
    ν_10 ~ priors[:ν_10]   # P(V=1|T=1,D=0)
    ν_11 ~ priors[:ν_11]   # P(V=1|T=1,D=1)
    
    # Calculate multinomial cell probabilities
    p_s1 = ν_11 * se * π
    p_s2 = ν_01 * (1 - se) * π
    p_r1 = ν_10 * (1 - sp) * (1 - π)
    p_r2 = ν_00 * sp * (1 - π)
    p_u1 = (1 - ν_11) * se * π + (1 - ν_10) * (1 - sp) * (1 - π)
    p_u2 = (1 - ν_01) * (1 - se) * π + (1 - ν_00) * sp * (1 - π)
    
    # Likelihood: observed data follows multinomial distribution
    y ~ Multinomial(n, [p_s1, p_s2, p_r1, p_r2, p_u1, p_u2])
end

# prior distributions: beta
priors = Dict(
    :π => Beta2(0.1, 0.05),
    :se => Beta2(0.85, 0.10),
    :sp => Beta2(0.85, 0.10),
    :ν_00 => Beta2(0.6, 0.15),
    :ν_01 => Beta2(0.7, 0.15),
    :ν_10 => Beta2(0.7, 0.15),   
    :ν_11 => Beta2(0.9, 0.10)
)

# true values of parameters
true_params = (π = 0.12,
              se = 0.88,
              sp = 0.82,
              ν_00 = 0.55,
              ν_01 = 0.72,
              ν_10 = 0.68,
              ν_11 = 0.92)

# data
y = simdata(1000; true_params...)

# Fit the model with multiple chains (multi-core)
model = zhou_model(y, priors)
chain = sample(model, NUTS(), MCMCThreads(), 10000, 4; num_warmup = 2500, thinning = 5)

# Extract posterior summaries
posterior_summary = describe(chain)
plot(chain)

When I try to introduce parameter correlations via Gaussian copulas, I get an auto-differentiation error:

#### COPULA ####
@model function zhou_model_copula(y, priors)
    # Extract data components
    n = sum(y)
    
    # ============================================
    # π: Independent from all others
    # ============================================
    π ~ priors[:π]
    
    # ============================================
    # se and sp: Negatively correlated via Gaussian copula
    # ============================================
    # Prior on 2x2 correlation matrix: LKJ with η < 1 to favor negative correlations
    R_sesp ~ LKJCholesky(2, 0.5)  # η = 0.5 pushes toward extreme correlations
    
    # Sample from standard normal (copula space)
    z_sesp ~ MvNormal(zeros(2), 1.0)
    
    # Transform to correlated standard normals
    u_sesp = R_sesp.L * z_sesp  # Cholesky factor multiplication
    
    # Transform to uniform via standard normal CDF
    unif_sesp = cdf.(Normal(), u_sesp)
    
    # Transform to Beta via inverse CDF (quantile function)
    se = quantile(priors[:se], unif_sesp[1])
    sp = quantile(priors[:sp], unif_sesp[2])
    
    # ============================================
    # ν parameters: Gaussian copula with non-informative prior
    # ============================================
    # Prior on 4x4 correlation matrix: LKJ with η = 1 (uniform over correlation matrices)
    R_nu ~ LKJCholesky(4, 1.0)  # η = 1 is non-informative
    
    # Sample from standard normal (copula space)
    z_nu ~ MvNormal(zeros(4), 1.0)
    
    # Transform to correlated standard normals
    u_nu = R_nu.L * z_nu
    
    # Transform to uniform via standard normal CDF
    unif_nu = cdf.(Normal(), u_nu)
    
    # Transform to Beta via inverse CDF
    ν_00 = quantile(priors[:ν_00], unif_nu[1])
    ν_01 = quantile(priors[:ν_01], unif_nu[2])
    ν_10 = quantile(priors[:ν_10], unif_nu[3])
    ν_11 = quantile(priors[:ν_11], unif_nu[4])
    
    # ============================================
    # Calculate multinomial cell probabilities
    # ============================================
    p_s1 = ν_11 * se * π
    p_s2 = ν_01 * (1 - se) * π
    p_r1 = ν_10 * (1 - sp) * (1 - π)
    p_r2 = ν_00 * sp * (1 - π)
    p_u1 = (1 - ν_11) * se * π + (1 - ν_10) * (1 - sp) * (1 - π)
    p_u2 = (1 - ν_01) * (1 - se) * π + (1 - ν_00) * sp * (1 - π)
    
    # Likelihood
    y ~ Multinomial(n, [p_s1, p_s2, p_r1, p_r2, p_u1, p_u2])

    # Return transformed parameters
    return (se, sp, ν_00, ν_01, ν_10, ν_11)
end

model = zhou_model_copula(y, priors)
chain = sample(model, NUTS(), 2000)
ERROR: MethodError: no method matching _beta_inc_inv(::ForwardDiff.Dual{…}, ::ForwardDiff.Dual{…}, ::ForwardDiff.Dual{…})
The function `_beta_inc_inv` exists, but no method is defined for this combination of argument types.

Closest candidates are:
  _beta_inc_inv(::Float64, ::Float64, ::Float64, ::Float64)
   @ SpecialFunctions C:\Users\ThinkStation\.julia\packages\SpecialFunctions\mf0qH\src\beta_inc.jl:937
  _beta_inc_inv(::Float64, ::Float64, ::Float64)
   @ SpecialFunctions C:\Users\ThinkStation\.julia\packages\SpecialFunctions\mf0qH\src\beta_inc.jl:937
  _beta_inc_inv(::T, ::T, ::T, ::T) where T<:Union{Float16, Float32}
   @ SpecialFunctions C:\Users\ThinkStation\.julia\packages\SpecialFunctions\mf0qH\src\beta_inc.jl:1052
  ...

Stacktrace:
  [1] beta_inc_inv(a::Float64, b::Float64, p::ForwardDiff.Dual{ForwardDiff.Tag{…}, Float64, 7})
    @ SpecialFunctions C:\Users\ThinkStation\.julia\packages\SpecialFunctions\mf0qH\src\beta_inc.jl:931
  [2] betainvcdf(α::Float64, β::Float64, p::ForwardDiff.Dual{ForwardDiff.Tag{DynamicPPL.DynamicPPLTag, Float64}, Float64, 7})
    @ StatsFuns C:\Users\ThinkStation\.julia\packages\StatsFuns\gSNRw\src\distrs\beta.jl:101
  [3] quantile
    @ C:\Users\ThinkStation\.julia\packages\Distributions\YQSrn\src\univariates.jl:652 [inlined]
  [4] zhou_model_copula(__model__::DynamicPPL.Model{…}, __varinfo__::DynamicPPL.ThreadSafeVarInfo{…}, y::Matrix{…}, priors::Dict{…})
    @ Main c:\Users\ThinkStation\Desktop\TINERVIA\personal\baysesp\code.jl:124
  [5] _evaluate!!
    @ C:\Users\ThinkStation\.julia\packages\DynamicPPL\R1CV0\src\model.jl:921 [inlined]
  [6] evaluate_threadsafe!!
    @ C:\Users\ThinkStation\.julia\packages\DynamicPPL\R1CV0\src\model.jl:903 [inlined]
  [7] evaluate!!(model::DynamicPPL.Model{…}, varinfo::DynamicPPL.VarInfo{…})
    @ DynamicPPL C:\Users\ThinkStation\.julia\packages\DynamicPPL\R1CV0\src\model.jl:870
  [8] logdensity_at
    @ C:\Users\ThinkStation\.julia\packages\DynamicPPL\R1CV0\src\logdensityfunction.jl:242 [inlined]
  [9] LogDensityAt
    @ C:\Users\ThinkStation\.julia\packages\DynamicPPL\R1CV0\src\logdensityfunction.jl:262 [inlined]
 [10] chunk_mode_gradient!(result::DiffResults.MutableDiffResult{…}, f::DynamicPPL.LogDensityAt{…}, x::Vector{…}, cfg::ForwardDiff.GradientConfig{…})
    @ ForwardDiff C:\Users\ThinkStation\.julia\packages\ForwardDiff\Or6Qh\src\gradient.jl:132
 [11] gradient!(result::DiffResults.MutableDiffResult{…}, f::DynamicPPL.LogDensityAt{…}, x::Vector{…}, cfg::ForwardDiff.GradientConfig{…}, ::Val{…})
    @ ForwardDiff C:\Users\ThinkStation\.julia\packages\ForwardDiff\Or6Qh\src\gradient.jl:41
 [12] value_and_gradient(::DynamicPPL.LogDensityAt{…}, ::DifferentiationInterfaceForwardDiffExt.ForwardDiffGradientPrep{…}, ::AutoForwardDiff{…}, ::Vector{…})     
    @ DifferentiationInterfaceForwardDiffExt C:\Users\ThinkStation\.julia\packages\DifferentiationInterface\cZ1tI\ext\DifferentiationInterfaceForwardDiffExt\onearg.jl:419
 [13] logdensity_and_gradient(f::LogDensityFunction{…}, x::Vector{…})
    @ DynamicPPL C:\Users\ThinkStation\.julia\packages\DynamicPPL\R1CV0\src\logdensityfunction.jl:289
 [14] Fix1
    @ .\operators.jl:1127 [inlined]
 [15] ∂H∂θ(h::AdvancedHMC.Hamiltonian{…}, θ::Vector{…})
    @ AdvancedHMC C:\Users\ThinkStation\.julia\packages\AdvancedHMC\B1wPY\src\hamiltonian.jl:46
 [16] phasepoint(h::AdvancedHMC.Hamiltonian{…}, θ::Vector{…}, r::Vector{…})
    @ AdvancedHMC C:\Users\ThinkStation\.julia\packages\AdvancedHMC\B1wPY\src\hamiltonian.jl:103
 [17] phasepoint
    @ C:\Users\ThinkStation\.julia\packages\AdvancedHMC\B1wPY\src\hamiltonian.jl:185 [inlined]
 [18] find_initial_params(rng::Random.TaskLocalRNG, model::DynamicPPL.Model{…}, varinfo::DynamicPPL.VarInfo{…}, hamiltonian::AdvancedHMC.Hamiltonian{…}; max_attempts::Int64)
    @ Turing.Inference C:\Users\ThinkStation\.julia\packages\Turing\ArWoY\src\mcmc\hmc.jl:156
 [19] find_initial_params(rng::Random.TaskLocalRNG, model::DynamicPPL.Model{…}, varinfo::DynamicPPL.VarInfo{…}, hamiltonian::AdvancedHMC.Hamiltonian{…})
    @ Turing.Inference C:\Users\ThinkStation\.julia\packages\Turing\ArWoY\src\mcmc\hmc.jl:145
 [20] initialstep(rng::Random.TaskLocalRNG, model::DynamicPPL.Model{…}, spl::DynamicPPL.Sampler{…}, vi_original::DynamicPPL.VarInfo{…}; initial_params::Nothing, nadapts::Int64, verbose::Bool, kwargs::@Kwargs{})
    @ Turing.Inference C:\Users\ThinkStation\.julia\packages\Turing\ArWoY\src\mcmc\hmc.jl:206
 [21] step(rng::Random.TaskLocalRNG, model::DynamicPPL.Model{…}, spl::DynamicPPL.Sampler{…}; initial_params::Nothing, kwargs::@Kwargs{…})
    @ DynamicPPL C:\Users\ThinkStation\.julia\packages\DynamicPPL\R1CV0\src\sampler.jl:133
 [22] step
    @ C:\Users\ThinkStation\.julia\packages\DynamicPPL\R1CV0\src\sampler.jl:116 [inlined]
 [23] macro expansion
    @ C:\Users\ThinkStation\.julia\packages\AbstractMCMC\nQLlh\src\sample.jl:171 [inlined]
 [24] macro expansion
    @ C:\Users\ThinkStation\.julia\packages\AbstractMCMC\nQLlh\src\logging.jl:137 [inlined]
 [25] mcmcsample(rng::Random.TaskLocalRNG, model::DynamicPPL.Model{…}, sampler::DynamicPPL.Sampler{…}, N::Int64; progress::Bool, progressname::String, callback::Nothing, num_warmup::Int64, discard_initial::Int64, thinning::Int64, chain_type::Type, initial_state::Nothing, kwargs::@Kwargs{…})
    @ AbstractMCMC C:\Users\ThinkStation\.julia\packages\AbstractMCMC\nQLlh\src\sample.jl:151
 [26] sample(rng::Random.TaskLocalRNG, model::DynamicPPL.Model{…}, sampler::DynamicPPL.Sampler{…}, N::Int64; chain_type::Type, resume_from::Nothing, initial_state::Nothing, progress::Bool, nadapts::Int64, discard_adapt::Bool, discard_initial::Int64, kwargs::@Kwargs{})
    @ Turing.Inference C:\Users\ThinkStation\.julia\packages\Turing\ArWoY\src\mcmc\hmc.jl:117
 [27] sample
    @ C:\Users\ThinkStation\.julia\packages\Turing\ArWoY\src\mcmc\hmc.jl:86 [inlined]
 [28] #sample#105
    @ C:\Users\ThinkStation\.julia\packages\Turing\ArWoY\src\mcmc\abstractmcmc.jl:31 [inlined]
 [29] sample
    @ C:\Users\ThinkStation\.julia\packages\Turing\ArWoY\src\mcmc\abstractmcmc.jl:22 [inlined]
 [30] #sample#104
    @ C:\Users\ThinkStation\.julia\packages\Turing\ArWoY\src\mcmc\abstractmcmc.jl:19 [inlined]
 [31] sample(model::DynamicPPL.Model{…}, alg::NUTS{…}, N::Int64)
    @ Turing.Inference C:\Users\ThinkStation\.julia\packages\Turing\ArWoY\src\mcmc\abstractmcmc.jl:16
 [32] top-level scope
    @ c:\Users\ThinkStation\Desktop\TINERVIA\personal\baysesp\code.jl:166
Some type information was truncated. Use `show(err)` to see complete types.

I imagine the problem is with

# Transform to Beta via inverse CDF
    ν_00 = quantile(priors[:ν_00], unif_nu[1])
    ν_01 = quantile(priors[:ν_01], unif_nu[2])
    ν_10 = quantile(priors[:ν_10], unif_nu[3])
    ν_11 = quantile(priors[:ν_11], unif_nu[4])

The error occurs when computing quantile(priors[:ν_00], unif_nu[1]) and similar lines. It appears that the Beta distribution’s quantile function (which relies on _beta_inc_inv internally) doesn’t support automatic differentiation with ForwardDiff.Dual numbers, which NUTS requires for computing gradients.

Is there a workaround for this auto-differentiation limitation? I’ve tried using ReverseDiff, but it still didn’t worked.

Try a source-to-source AD like MoonCake.

Enzyme should also work quite well (and fast) on Turing code too!

1 Like

Using MoonCake was the solution. Enzyme gave me problem.

Now, the problem is that my implementation is really slow.

My aim is to compare the two models (with Beta priors and Gaussian copula priors) to see if the Gaussian copula give a reduction in MSE. It happens but it is very low.

I’m new to Julia and this is the most complicated thing I’ve tried so far. Do you see any possibility of optimization that I can’t see?

using Turing, Distributions, Copulas, LinearAlgebra, Plots, StatsPlots, DifferentiationInterface, MCMCChains, Base.Threads, ProgressMeter
import Mooncake


# function to simulate data from Zhou's model
function simdata(
    n;
    π = .1,
    se = .9,
    sp = .8,
    ν_00 = .5,
    ν_01 = .7,
    ν_10 = .6,
    ν_11 = .9)
    
    p_s1 = ν_11 * se * π
    p_s2 = ν_01 * (1 - se) * π
    p_r1 = ν_10 * (1 - sp) * (1 - π)
    p_r2 = ν_00 * sp * (1 - π)
    p_u1 = (1 - ν_11) * se * π + (1 - ν_10) * (1 - sp) * (1 - π)
    p_u2 = (1 - ν_01) * (1 - se) * π + (1 - ν_00) * sp * (1 - π)
    
    d = Distributions.Multinomial(n, [p_s1, p_s2, p_r1, p_r2, p_u1, p_u2])
    
    rand(d, 1)
end

# reparametrizing Beta distribution in term of its moments
function Beta2(μ, σ)
    σ² = σ^2
    if σ² >= μ*(1 - μ)
        σ² = 0.99*μ*(1 - μ)
    end
    γ = μ*(1 - μ)/σ² - 1
    α = μ*γ
    β = (1 - μ)*γ
    Distributions.Beta(α, β)
end

# Zhou's model
@model function zhou_model(y, priors)
    # Extract data components
    n = sum(y)
    
    # Prior distributions for diagnostic test parameters
    π ~ priors[:π]      # Disease prevalence
    se ~ priors[:se]    # Sensitivity
    sp ~ priors[:sp]    # Specificity
    
    # Prior distributions for verification process parameters
    ν_00 ~ priors[:ν_00]   # P(V=1|T=0,D=0)
    ν_01 ~ priors[:ν_01]   # P(V=1|T=0,D=1)
    ν_10 ~ priors[:ν_10]   # P(V=1|T=1,D=0)
    ν_11 ~ priors[:ν_11]   # P(V=1|T=1,D=1)
    
    # Calculate multinomial cell probabilities
    p_s1 = ν_11 * se * π
    p_s2 = ν_01 * (1 - se) * π
    p_r1 = ν_10 * (1 - sp) * (1 - π)
    p_r2 = ν_00 * sp * (1 - π)
    p_u1 = (1 - ν_11) * se * π + (1 - ν_10) * (1 - sp) * (1 - π)
    p_u2 = (1 - ν_01) * (1 - se) * π + (1 - ν_00) * sp * (1 - π)
    
    # Likelihood: observed data follows multinomial distribution
    y ~ Multinomial(n, [p_s1, p_s2, p_r1, p_r2, p_u1, p_u2])
end


@model function zhou_model_copula(y, priors)
    # Extract data components
    n = sum(y)

    # ============================================
    # π: Independent from all others
    # ============================================
    π ~ priors[:π]

    # ============================================
    # se and sp: Negatively correlated via Gaussian copula
    # ============================================
    # Prior on 2x2 correlation matrix: LKJ with η < 1 to favor strong correlations
    L_sesp ~ LKJCholesky(2, 0.5)  # η = 0.5 pushes toward extreme correlations
    # R_sesp = L_sesp.L * L_sesp.L'

    # Sample from standard normal (copula space)
    z_sesp ~ MvNormal(2, 1.0)

    # Transform to correlated standard normals
    u_sesp = L_sesp.L * z_sesp  # Cholesky factor multiplication

    # Transform to uniform via standard normal CDF
    unif_sesp = cdf.(Normal(), u_sesp)

    # Transform to Beta via inverse CDF (quantile function)
    se = quantile(priors[:se], unif_sesp[1])
    sp = quantile(priors[:sp], unif_sesp[2])

    # ============================================
    # ν parameters: Gaussian copula with non-informative prior
    # ============================================
    # Prior on 4x4 correlation matrix: LKJ with η = 1 (uniform over correlation matrices)
    L_nu ~ LKJCholesky(4, 1.0)  # η = 1 is non-informative
    # R_nu = L_nu.L * L_nu.L'

    # Sample from standard normal (copula space)
    z_nu ~ MvNormal(4, 1.0)

    # Transform to correlated standard normals
    u_nu = L_nu.L * z_nu

    # Transform to uniform via standard normal CDF
    unif_nu = cdf.(Normal(), u_nu)

    # Transform to Beta via inverse CDF
    ν_00 = quantile(priors[:ν_00], unif_nu[1])
    ν_01 = quantile(priors[:ν_01], unif_nu[2])
    ν_10 = quantile(priors[:ν_10], unif_nu[3])
    ν_11 = quantile(priors[:ν_11], unif_nu[4])

    # ============================================
    # Calculate multinomial cell probabilities
    # ============================================
    p_s1 = ν_11 * se * π
    p_s2 = ν_01 * (1 - se) * π
    p_r1 = ν_10 * (1 - sp) * (1 - π)
    p_r2 = ν_00 * sp * (1 - π)
    p_u1 = (1 - ν_11) * se * π + (1 - ν_10) * (1 - sp) * (1 - π)
    p_u2 = (1 - ν_01) * (1 - se) * π + (1 - ν_00) * sp * (1 - π)

    # Likelihood
    y ~ Multinomial(n, [p_s1, p_s2, p_r1, p_r2, p_u1, p_u2])

    # Return transformed parameters
    return (se, sp, ν_00, ν_01, ν_10, ν_11)
end

function simulation(
    true_params,
    priors;
    nsim = 1000,
    n = 1000,
    niter = 5000,
    num_warmup = 1000,
    thinning = 4)

    ind = (num_warmup + 1):thinning:niter
    true_params_vector = [true_params[i] for i in 1:length(true_params)]

    mse_chain1_matrix = zeros(nsim, 7)
    mse_chain2_matrix = zeros(nsim, 7)

    @showprogress Threads.@threads for i in 1:nsim
        y = simdata(n; true_params...)

        # model 1
        model1 = zhou_model(y, priors)
        chain1 = sample(model1, NUTS(), niter; num_warmup = num_warmup, thinning = thinning)
        chain1 = Array(chain1)[ind,:]
        
        mean_chain1 = [mean(chain1[:, i]) for i in 1:size(chain1, 2)]
        var_chain1 = [var(chain1[:, i]) for i in 1:size(chain1, 2)]
        bias_chain1 = mean_chain1 - true_params_vector
        mse_chain1_matrix[i, :] = bias_chain1.^2 + var_chain1


        # model2
        model2 = zhou_model_copula(y, priors)
        chain2 = sample(model2, NUTS(adtype = AutoMooncake()), niter; num_warmup = num_warmup, thinning = thinning)
        generated2 = generated_quantities(model2, chain2)[ind, :]
        # Get all parameters in model2
        π_samples = Array(chain2[:π])[ind,:]
        se_samples = [g[1] for g in generated2]
        sp_samples = [g[2] for g in generated2]
        ν_00_samples = [g[3] for g in generated2]
        ν_01_samples = [g[4] for g in generated2]
        ν_10_samples = [g[5] for g in generated2]
        ν_11_samples = [g[6] for g in generated2]

        gen2 = hcat(π_samples, se_samples, sp_samples, ν_00_samples, ν_01_samples, ν_10_samples, ν_11_samples)
        mean_chain2 = [mean(gen2[:, i]) for i in 1:size(gen2, 2)]
        var_chain2 = [var(gen2[:, i]) for i in 1:size(gen2, 2)]
        bias_chain2 = mean_chain2 - true_params_vector
        mse_chain2_matrix[i, :] = bias_chain2.^2 + var_chain2    
    end

    mean_mse_1 = [mean(mse_chain1_matrix[:, i]) for i in 1:size(mse_chain1_matrix, 2)]
    mean_mse_2 = [mean(mse_chain2_matrix[:, i]) for i in 1:size(mse_chain2_matrix, 2)]

    hcat(mean_mse_1, mean_mse_2)

end



# prior distributions: beta
priors = Dict(
    :π => Beta2(0.1, 0.05),
    :se => Beta2(0.85, 0.10),
    :sp => Beta2(0.85, 0.10),
    :ν_00 => Beta2(0.6, 0.15),
    :ν_01 => Beta2(0.7, 0.15),
    :ν_10 => Beta2(0.7, 0.15),   
    :ν_11 => Beta2(0.8, 0.15)
)

# true values of parameters
true_params = (π = 0.12,
              se = 0.88,
              sp = 0.82,
              ν_00 = 0.55,
              ν_01 = 0.72,
              ν_10 = 0.68,
              ν_11 = 0.92)


nsim = 50
n = 1000
niter = 5000
num_warmup = 1000
thinning = 4

simulation(true_params, priors; nsim = nsim, n = n, niter = niter, num_warmup = num_warmup, thinning = thinning)



The first thing you want to do is profile your code to see where it takes time. You can do it with the @profview macro in the VSCode Julia REPL, or with packages like ProfileView.jl. There are more details on performance engineering at https://modernjuliaworkflows.org