A basic binary logic works fine
using Optimization, OptimizationOptimJL, OptimizationBBO, OptimizationMOI, ForwardDiff, ModelingToolkit, Random
function SimulateBinaryLogit(x, Beta)
N = size(x, 1)
J = size(Beta, 1)
epsilon = -log.(-log.(rand(N, J)))
Beta_augmented = [Beta zeros(2)]
utility = x * Beta_augmented + epsilon
(maxvalue, maxindex) = findmax(utility, dims=2)
choice = getindex.(maxindex,2)
y = choice .== 1
end
function BinaryLogitLL(u,p)
x = p[:,1:2]
y = p[:,3]
Lambda_xb = exp.(x * u) ./ (1 .+ exp.(x * u))
ll_i = y .* log.(Lambda_xb) + (1 .- y) .* log.(1 .- Lambda_xb)
LL = -sum(ll_i)
end
Random.seed!(1)
N = 1000
Beta = [0.5; 0.5]
income = randn(N, 1)
x = [ones(N, 1) income]
y = SimulateBinaryLogit(x, Beta)
p = [x y]
u0 = [0.0; 0.0]
optf = OptimizationFunction(BinaryLogitLL, Optimization.AutoForwardDiff())
prob = OptimizationProblem(optf, u0, p, lb = [-10.0, -10.0], ub = [10.0, 10.0])
sol = solve(prob, BFGS())
Everything is fine so far. However when I try and expand this to a Monte Carlo simulation
struct input
y
x
R
end
function BinaryLogitSimulatedLL(u, p)
y = p.y
x = p.x
R = p.R
Random.seed!(1)
N = size(y, 1)
Simulated_y = Array{Float64, 2}(undef, N, R)
for i in 1:R
Simulated_y[:,i] = SimulateBinaryLogit(x, u)
end
SimProb = mean(Simulated_y, dims=2)
ll_i = y .* log.(SimProb) .+ (1 .- y) .* log.(1 .- SimProb)
LL = -sum(ll_i)
end
R = 1000
p = input(y,x,R)
u = [0.0; 0.0]
optf = OptimizationFunction(BinaryLogitSimulatedLL, Optimization.AutoForwardDiff())
prob = OptimizationProblem(optf, u, p, lb = -1.0*ones(2), ub = ones(2))
sol = solve(prob, BFGS())
It seems that whatever initial values I give the routine tells me that is optimal even though I set the optimal solution in advance to be [0.5;0.5]