I wrote a Metropolis-Hastings code manually for a problem that samples arrays from a custom distribution that can be found here https://github.com/marouanehanhasse/QComm/blob/main/distributions.jl
, before i was using Klara
package for older versions of julia, since it was closed i’ve been struggling to find a similar package i tried AdvancedMH
but i can’t have outputs since i can’t get the gradient of the target function because it has JuMP
, i found AdaptiveMCMC
https://juliapackages.com/p/adaptivemcmc
and it looks exactly like what i want, but they are giving an example that i didn’t quite understand it for a custom distribution. I want to know how can i use this package if i have a custom distribution from which i can have the proposed array, and a target function.
This is the code where i wrote the manual MCMC:
using Plots
using Random
using SymPy
using CSV
using DataFrames
using Main.QuantumRelay
using Distributions
#calling the github code
function quantumrelay(alpha,delta,name::String,n) #n is the number of sources for 1 relay we have 2 sources
chi = fill(sqrt(0.06), n) # the parameter chi
phi = im*tanh.(chi)
omega = -log(prod(cosh.(chi)))
syms, op = qrelay_op(n, phi, alpha, delta) #operators.jl
op_a, op_ab, mat, coef = op_mat(op) #operators.jl
op_q2 = [syms.apH[1], syms.apV[1], syms.bpH[end], syms.bpV[end]] # array in operators.jl
op_q1 = [syms.apH[2:end]..., syms.apV[2:end]..., syms.bpH[1:end-1]..., syms.bpV[1:end-1]...] #array in operators.jl
mask_q1 = [op in op_q1 for op in op_a]; #array in operators.jl
mask_q2 = [op in op_q2 for op in op_a]; #array in operators.jl
qq = [x in syms.apH || x in syms.bpV ? 1 : 0 for x in op_a]; #array in operators.jl
pdet0 = pdet_maker(0.04, 3e-5) #function utility.jl
qrs = QRelaySampler(mat, coef, omega, pdet0). #QuantumRelay.jl
targetcache=Dict{Vector{Int}, Float64}()
target(x::Vector)= log(qrs.prob(qq, x, mask_q1))+ log(qrs.prob(x)) #the log target function of MCMC the function prob is in QuantumRelay.jl
Iteration=2^17
burnin=2^10
samples=Iteration+burnin
step=5
save_iter=burnin:samples
dist= qrs.psetproposal #the proposal distribution from the distribution.jl
selected=Array[]
Q = Float64[]
current_x = zeros(8)
@time for i in 2:samples #from this line the MCMC algorithm starts
proposed_x= rand(dist(current_x))
prop_proposed= logpdf(dist(current_x), proposed_x) #the probability from x->y
prop_current= logpdf(dist(proposed_x), current_x) #the probability from y->x
A= min(0,target(proposed_x)+prop_current-target(current_x)-prop_proposed)
if log(rand()) < A
#if i in save_iter
push!(selected, proposed_x)
push!(Q, qrs.prob(qq, proposed_x, mask_q2))
#end
current_x = proposed_x
else
current_x= current_x
end
end
return selected, Q
end
prob= Float64[]
accepted= Array[]
for i = 0:14
beta = i*pi/14
name = string(i)
selected, Q = quantumrelay(pi/4, beta, name,2)
push!(accepted,length(selected))
println("beta:", beta)
push!(prob,Q)
df=DataFrame(selected=selected)
end
The reason why i want to change it is because using Klara.jl
before i was getting better results, and with this one the outcome is not as good as before when i used Klara.jl
.