I appreciate if someone can comment on the following behavior. Please consider the code below for an estimator type:

```
type SimpleKriging{T<:Real} <: AbstractEstimator
# input fields
X::AbstractMatrix{T}
z::AbstractVector{T}
cov::CovarianceModel
μ::T
# state fields
LLᵀ::Cholesky
function SimpleKriging(X, z, cov, μ)
@assert size(X, 2) == length(z) "incorrect data configuration"
SK = new(X, z, cov, μ)
fit!(SK, X)
SK
end
end
SimpleKriging(X, z, cov, μ) = SimpleKriging{eltype(z)}(X, z, cov, μ)
function fit!{T<:Real}(estimator::SimpleKriging{T}, X::AbstractMatrix{T})
C = pairwise(estimator.cov, X)
estimator.LLᵀ = cholfact(C)
end
function estimate{T<:Real}(estimator::SimpleKriging{T}, xₒ::AbstractVector{T})
X = estimator.X; z = estimator.z
cov = estimator.cov; μ = estimator.μ
LLᵀ = estimator.LLᵀ
nobs = length(z)
# evaluate covariance at location
c = Float64[cov(norm(X[:,j]-xₒ)) for j=1:nobs]
# solve linear system
y = z - μ
λ = LLᵀ \ c
# return estimate and variance
μ + y⋅λ, cov(0) - c⋅λ
end
```

The code is working with Float64, but when I try lower precision as in the script below, I get **UndefRefError: access to undefined reference** and a line number that points to the field `LLᵀ::Cholesky`

. The exact line of code that triggers the error is the line `μ2, σ² = estimate(simkrig2, xₒ)`

:

```
# create some data
dim, nobs = 3, 100
X1 = rand(dim, nobs); z = rand(nobs)
X2 = convert(Matrix{Float32},X1)
# define a covariance type...
cov = some_function_of_x_and_y
# construct estimators
simkrig1 = SimpleKriging(X1, z, cov, mean(z))
simkrig2 = SimpleKriging(X2, z, cov, mean(z))
# estimate
μ1, σ² = estimate(simkrig1, xₒ)
μ2, σ² = estimate(simkrig2, xₒ)
```

I wonder if this is the correct idiom in Julia for initializing state fields in a class. These are fields that are expensive to compute and I would like to have them computed once upon instantiation, or later if the user needs to retrain with the `fit!`

method. Pretty much like the Python’s sklearn API.