Thanks for the kind words and for using ArviZ.jl! It now supports all of ArviZ’s API and as of yesterday has some usage docs.
@daniel is right about log_likelihood
being a generated quantity in Stan. Turing doesn’t have this yet, though from what I understand, the Turing devs are working on it.
We had a discussion on the Julia slack workspace last week, and Jonas Mackerodt (@Jonas ?) shared this trick for getting the log likelihood and predictive values that I haven’t been able to try in detail yet. Perhaps it will be useful
using Turing
@model school8(J, y, sigma, gen=false) = begin
mu ~ Normal(0, 5)
tau ~ Truncated(Cauchy(0, 5), 0, Inf)
eta = tzeros(J)
eta ~ [Normal(0.0, 1.0)]
theta = mu .+ tau .* eta
if gen == true
y_hat = Vector{Real}(undef, J)
log_lik = Vector{Real}(undef, J)
for j = 1:J
dist = Normal(theta[j], sigma[j])
y[j] ~ dist
y_hat[j] = rand(dist)
log_lik[j] = logpdf(dist, y[j])
end
return (mu=mu, tau=tau, eta=eta, theta=theta, y_hat=y_hat, log_lik=log_lik)
end
end
@model school8_mv(J, y, sigma, gen=false) = begin
mu ~ Normal(0, 5)
tau ~ Truncated(Cauchy(0, 5), 0, Inf)
#eta = tzeros(J)
eta ~ MvNormal(fill(0.0,J), fill(1.0,J))
theta = mu .+ tau .* eta
if gen == true
y_hat = Vector{Real}(undef, J)
log_lik = Vector{Real}(undef, J)
for j = 1:J
dist = Normal(theta[j], sigma[j])
y[j] ~ dist
y_hat[j] = rand(dist)
log_lik[j] = logpdf(dist, y[j])
end
return (mu=mu, tau=tau, eta=eta, theta=theta, y_hat=y_hat, log_lik=log_lik)
end
end
J = 8;
y = [28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0];
sigma = [15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0];
model_fun = school8(J, y, sigma);
model_fun_mv = school8_mv(J, y, sigma);
#model_fun() # correctly draws a NamedTuple
chns = sample(model_fun, NUTS(2000,0.8), 4000) # lacks theta, y_hat, and log_lik
chns_mv = sample(model_fun_mv, NUTS(2000,0.8), 4000) # lacks theta, y_hat, and log_lik
mixeddensity(chns,[:mu, :tau])
mixeddensity!(chns_mv,[:mu, :tau])
function get_nlogp(model,sm)
# Set up the model call, sample from the prior.
vi = Turing.VarInfo(model)
# Define a function to optimize.
function nlogp(sm)
spl = Turing.SampleFromPrior()
new_vi = Turing.VarInfo(vi, spl, sm)
a=model(new_vi, spl)
return(a)
end
return nlogp(sm)
end
model_fun_ppc = school8(J, y, sigma, true)
model_fun_mv_ppc = school8_mv(J, y, sigma, true)
mu_post = Array(chns[:mu])
tau_post = Array(chns[:tau])
eta_post = Array(chns[:eta])
mu_post_mv = Array(chns_mv[:mu])
tau_post_mv = Array(chns_mv[:tau])
eta_post_mv = Array(chns_mv[:eta])
get_nlogp(model_fun_ppc,vcat(mu_post[1],tau_post[1],eta_post[1,:]))
get_nlogp(model_fun_mv_ppc,vcat(mu_post_mv[1],tau_post_mv[1],eta_post_mv[1,:]))