Hi,
I am relatively new to ForneyLab and am trying it again after a year-long break using Julia 1.7 and ForneyLab 0.11.4 . While most of the demos provided run fine, the simplest demo of all, Bayes-rule_discrete.ipynb
generates an error when run. More specifically, when executing
using ForneyLab
# Build the generative model
g = FactorGraph()
b = [0.7, 0.3] # Prior probability vector
A = [0.2 0.9; 0.8 0.1] # Left-stochastic matrix for conditional probability
@RV m ~ Categorical(b) # Prior
@RV w ~ Transition(m, A) # Observation model
placeholder(w, :w, dims=(2,)); # Placeholder for observation
algo = messagePassingAlgorithm(m) # Build the algorithm code
source_code = algorithmSourceCode(algo)
eval(Meta.parse(source_code)) # Parse and load the algorithm in scope
println(source_code)
the following error is generated on eval(Meta.parse(source_code)
:
error in method definition: function ForneyLab.step! must be explicitly imported to be extended
Stacktrace:
[1] top-level scope
@ none:0
[2] top-level scope
@ none:22
[3] eval
@ ./boot.jl:373 [inlined]
[4] eval(x::Expr)
@ Base.MainInclude ./client.jl:453
[5] top-level scope
@ In[35]:3
[6] eval
@ ./boot.jl:373 [inlined]
[7] include_string(mapexpr::typeof(REPL.softscope), mod::Module, code::String, filename::String)
@ Base ./loading.jl:1196
I searched the internet and the issues reported on the ForneyLab github, but did not find anything. Here is the source code generated via println(source_code)
:
begin
# You have created an algorithm that requires updates for (a) clamped parameter(s).
# This algorithm requires the definition of a custom `optimize!` function that updates the parameter value(s)
# by altering the `data` dictionary in-place. The custom `optimize!` function may be based on the mockup below:
# function optimize!(data::Dict, marginals::Dict=Dict(), messages::Vector{Message}=init())
# ...
# return data
# end
function init()
messages = Array{Message}(undef, 3)
messages[2] = Message(vague(Categorical, (2,)))
return messages
end
function step!(data::Dict, marginals::Dict=Dict(), messages::Vector{Message}=Array{Message}(undef, 3))
messages[1] = ruleSPCategoricalOutNP(nothing, Message(Multivariate, PointMass, m=[0.7, 0.3]))
messages[2] = ruleSPTransitionOutNCP(nothing, messages[1], Message(MatrixVariate, PointMass, m=[0.2 0.9; 0.8 0.1]))
messages[3] = ruleSPTransitionIn1PNP(Message(Multivariate, PointMass, m=data[:w]), nothing, Message(MatrixVariate, PointMass, m=[0.2 0.9; 0.8 0.1]))
marginals[:m] = messages[1].dist * messages[3].dist
return marginals
end
end # block
I assume I am doing something incorrectly. Note however, that most of all the demos use continuous distributions as opposed to this one. Any insight would be greatly appreciated!
Gordon