Fixing a parameter with Optimization.jl

Sure, here’s a version of your example using ParameterHandling:

using ParameterHandling
using Optimization
using OptimizationOptimJL
using Random
using Distributions

β₀, β₁, β₂, β₃, n, σ = -1.0, 1.0, 0.5, 3.0, 300, 0.1
Random.seed!(12301)
x₁ = rand(Uniform(-1, 1), n)
x₂ = rand(Normal(1.0, 0.5), n)
ε = rand(Normal(0.0, σ), n)
y = @. β₀ + β₁ * x₁ + β₂ * x₂ + β₃ * x₁ * x₂ + ε

params = (σ² = bounded(1.0, 0.0, 1e9), β = ones(4)) # β doesn't have bounds, just initial value
vals, unflatten = value_flatten(params)
function loglik(θ, data)
    (; σ², β) = θ
    y, x₁, x₂ = data
    ℓℓ = -n / 2 * log(2π * σ²)
    for i in 1:length(y)
        ℓℓ = ℓℓ - 0.5 / σ² * (y[i] - β[1] - β[2] * x₁[i] - β[3] * x₂[i] - β[4] * x₁[i] * x₂[i])^2
    end
    return ℓℓ
end
func = OptimizationFunction((θ, data) -> -loglik(unflatten(θ), data), Optimization.AutoForwardDiff())
prob = OptimizationProblem(func, vals, (y, x₁, x₂))
θhat = solve(prob, NelderMead())
soln = unflatten(collect(θhat))

# Fix σ²:
params = (σ² = fixed(1.0), β = ones(4))
vals, unflatten = value_flatten(params)
func = OptimizationFunction((θ, data) -> -loglik(unflatten(θ), data), Optimization.AutoForwardDiff())
prob = OptimizationProblem(func, vals, (y, x₁, x₂))
θhat = solve(prob, NelderMead())
soln = unflatten(collect(θhat))

Note that I prefer to call the second argument in the objective function data and keep the term params for parameters like θ to be estimated in the statistical sense.

1 Like