It’s a scope thing, try this (I only changed the name of the variable which overlap with the variable name of the objective value)
@views @inline function asclogit(bstart::Vector,Y::Array,X::Array,Z::Array,J::Int64,W::Array=ones(length(Y)))
## error checking
@assert ((!isempty(X) || !isempty(Z)) && !isempty(Y)) "You must supply data to the model"
@assert (ndims(Y)==1 && size(Y,2)==1) "Y must be a 1-D Array"
@assert (minimum(Y)==1 && maximum(Y)==J) "Y should contain integers numbered consecutively from 1 through J"
if !isempty(X)
@assert ndims(X)==2 "X must be a 2-dimensional matrix"
@assert size(X,1)==size(Y,1) "The 1st dimension of X should equal the number of observations in Y"
end
if !isempty(Z)
@assert ndims(Z)==3 "Z must be a 3-dimensional tensor"
@assert size(Z,1)==size(Y,1) "The 1st dimension of Z should equal the number of observations in Y"
end
K1 = size(X,2)
K2 = size(Z,2)
function f(b)
T = promote_type(promote_type(promote_type(eltype(X),eltype(b)),eltype(Z)),eltype(W))
num = zeros(T,size(Y))
dem = zeros(T,size(Y))
temp = zeros(T,size(Y))
numer = ones(T,size(Y,1),J)
P = zeros(T,size(Y,1),J)
ℓ = zero(T)
b2 = b[K1*(J-1)+1:K1*(J-1)+K2]
for j=1:(J-1)
temp .= X*b[(j-1)*K1+1:j*K1] .+ (Z[:,:,j].-Z[:,:,J])*b2
num .= (Y.==j).*temp.+num
dem .+= exp.(temp)
numer[:,j] .= exp.(temp)
end
dem.+=1
P .=numer./(1 .+ sum(numer;dims=2))
ℓ = -W'*(num.-log.(dem))
end
function g!(G,b)
T = promote_type(promote_type(promote_type(eltype(X),eltype(b)),eltype(Z)),eltype(W))
numer = zeros(T,size(Y,1),J)
P = zeros(T,size(Y,1),J)
numg = zeros(T,K2)
demg = zeros(T,K2)
b2 = b[K1*(J-1)+1:K1*(J-1)+K2]
#G = Array{T}(undef, length(b))
G .= T(0)
for j=1:(J-1)
numer[:,j] .= exp.( X*b[(j-1)*K1+1:j*K1] .+ (Z[:,:,j].-Z[:,:,J])*b2 )
end
P .=numer./(1 .+ sum(numer;dims=2))
for j=1:(J-1)
G[(j-1)*K1+1:j*K1] .= -X'*(W.*((Y.==j).-P[:,j]))
end
for j=1:(J-1)
numg .+= -(Z[:,:,j]-Z[:,:,J])'*(W.*(Y.==j))
demg .+= -(Z[:,:,j]-Z[:,:,J])'*(W.*P[:,j])
end
G[K1*(J-1)+1:K1*(J-1)+K2] .= numg.-demg
G
end
td = TwiceDifferentiable(f, g!, bstart, autodiff = :forwarddiff)
rs = optimize(td, bstart, LBFGS(; linesearch = LineSearches.BackTracking()), Optim.Options(iterations=100_000,g_tol=1e-8,f_tol=1e-8))
β = Optim.minimizer(rs)
ℓℓ = Optim.minimum(rs)*(-1)
H = Optim.hessian!(td, β)
se = sqrt.(diag(inv(H)))
q = Array{Float64}(undef, length(β))
grad = g!(q,β)
gradzero = g!(q,zeros(size(β)))
return β,se,ℓℓ,grad,gradzero
end
β,se_β,like,grad,grad0 = asclogit(zeros(size(θ)),Y,X,Zmatlab,3)