Thanks for your answer Chris. Iām not entirely sure what you mean by observation functionality? I may need some pointers to the right docs.
Code
Components
@named Ci = Capacitor(C=5)
@named Ria = Resistor(R=1)
@named Qh = Current()
@named Ta = Voltage()
@named gnd = Ground()
Control signals
value_vector = 300 * randn(Int(t_end))
Ī¦_h(t) = max(t >= t_end ? value_vector[end] : value_vector[Int(floor(t)) + 1], 0)
@register_symbolic Ī¦_h(t)
@named f_h = TimeVaryingFunction(Ī¦_h)
T_bias = T_zeroC + 20 # [K]
T_a(t) = T_bias # + 20*sin((Ļ)*t)
@register_symbolic T_a(t)
@named f_a = TimeVaryingFunction(T_a)
Connections
eqs = [
# component connections
ModelingToolkit.connect(Ci.p, Ria.p),
ModelingToolkit.connect(Qh.n, Ci.p),
ModelingToolkit.connect(Ta.p, Ria.n),
ModelingToolkit.connect(gnd.g, Ci.n),
ModelingToolkit.connect(gnd.g, Qh.p),
ModelingToolkit.connect(gnd.g, Ta.n),
# input to source connections
ModelingToolkit.connect(f_h.output, Qh.I),
ModelingToolkit.connect(f_a.output, Ta.V),
];
mtk_components = [Ci, Ria, Qh, Ta, f_h, f_a, gnd];
Problem setup
@named model = ODESystem(eqs, t, systems = mtk_components)
sys = structural_simplify(model)
# initial conditions
u0 = [T_bias]
prob = ODEProblem(sys, u0, (0, t_end))
Generate test data
function generate_data(nr_samples::Int64, t_end::Float64, noise::Float64, f_input::Function, prob::ODEProblem)
# sample points
t_int = t_end/nr_samples
t_sample = 0:t_int:t_end
# sample input signal
Ī¦_h_sample = f_input.(t_sample)
# sample output signal
d_sol = Vector(solve(prob, Tsit5(); saveat=t_int))
d_noise = noise * randn(size(d_sol))
T_i_sample = d_sol + d_noise
return t_sample, Ī¦_h_sample, T_i_sample, T_a.(t_sample)
end
n_samples = Int64(floor((100/40) * t_end))
t_int = t_end/n_samples
# test data samples
t_sample, Ī¦_h_sample, T_i_sample, T_a_sample = generate_data(n_samples, t_end, 2.0, Ī¦_h, prob)
Turing.jl model
using Turing
using DifferentialEquations
@model function LGDS(x::AbstractVector, prob)
# Prior distributions.
Ļ ~ InverseGamma(2, 3)
C ~ Gamma(1, 4)
R ~ Gamma(1, 1)
# ODE solution
p = [R, C]
T_ode = solve(prob, Tsit5(); p=p, saveat=t_int, save_idxs=1)
if length(T_ode) != length(x)
return
end
# observations
x ~ MvNormal(T_ode.u, Ļ^2 * I)
return nothing
end
model = LGDS(T_i_sample, prob)
# sample 3 independent chains
n_chains = 3
chain = sample(model, Turing.NUTS(), MCMCThreads(), 1000, n_chains; progress=true)
If you guys have any other suggestions Iām all ears ofcourse.
The main problem with the Turing.jl model right now is that the chains converge to wildly different values. If I create 10 chains I will have 10 non-overlapping distributions.