I am solving a system of equations using 2 methods. Solving it “directly” with `NLsolve`

, and solving it by minimizing the norm of the residuals of the system. Here’s an example:

```
using NLsolve
using Optim
using LinearAlgebra
using Random
Random.seed!(9876)
const szE = 2
const szG = 2
const szA = 9
const β = 0.92
const δ = 0.10
Π = 1000 .* rand(9,2,9,2)
Sm = 1000 .* rand(9,2)
Sf = 1000 .* rand(9,2)
function maxT(am, af; Z=9)
Z - max(am, af) + 1
end
function solvnl!(res, μ0; Sm=Sm, Sf=Sf, Π=Π)
res2 = reshape(res, (szA, szE, szG))
μ0′ = reshape(μ0, (szA, szE, szG))
prodsummand = similar(μ0, (szA, szE, szA, szE))
for idx in CartesianIndices(prodsummand)
am, em, af, ef = Tuple(idx)
prodsummand[idx] = Π[am,em,af,ef]*sqrt(Sm[am,em]*Sf[af,ef])*prod( abs((μ0′[am+k,em,1]*μ0′[af+k,ef,2])/(Sm[am+k,em]*Sf[af+k,ef]))^(0.5*(β*(1-δ))^k) for k in 0:(maxT(am,af; Z=9)-1) )
end
for idx in CartesianIndices(res2)
am, em = Tuple(idx)
res2[am, em, 1] = Sm[am, em] - μ0′[am,em,1] - sum( prodsummand[am,em,af,ef] for af in 1:szA, ef in 1:szE )
end
for idx in CartesianIndices(res2)
af, ef = Tuple(idx)
res2[af, ef, 2] = Sf[af, ef] - μ0′[af,ef,2] - sum( prodsummand[am,em,af,ef] for am in 1:szA, em in 1:szE )
end
end
function solvopt!(μ0; Sm=Sm, Sf=Sf, Π=Π)
res2 = similar(μ0, (szA, szE, szG))
μ0′ = reshape(μ0, (szA, szE, szG))
prodsummand = similar(μ0, (szA, szE, szA, szE))
for idx in CartesianIndices(prodsummand)
am, em, af, ef = Tuple(idx)
prodsummand[idx] = Π[am,em,af,ef]*sqrt(Sm[am,em]*Sf[af,ef])*prod( abs((μ0′[am+k,em,1]*μ0′[af+k,ef,2])/(Sm[am+k,em]*Sf[af+k,ef]))^(0.5*(β*(1-δ))^k) for k in 0:(maxT(am,af; Z=9)-1) )
end
for idx in CartesianIndices(res2)
am, em = Tuple(idx)
res2[am, em, 1] = Sm[am, em] - μ0′[am,em,1] - sum( prodsummand[am,em,af,ef] for af in 1:szA, ef in 1:szE )
end
for idx in CartesianIndices(res2)
af, ef = Tuple(idx)
res2[af, ef, 2] = Sf[af, ef] - μ0′[af,ef,2] - sum( prodsummand[am,em,af,ef] for am in 1:szA, em in 1:szE )
end
return norm(res2)
end
init = rand(9,2,2)
nlsol = nlsolve(solvnl!, init; iterations=10_000, autodiff=:forward, method=:newton)
optsol = optimize(solvopt!, init, BFGS(), Optim.Options(iterations=10_000); autodiff=:forward)
```

The problem is that, while `nlsolve`

finds a zero, `optimize`

does not. In fact, it is strange that `nlsol.zero`

does obtain a zero for `solvopt!`

that `Optim`

can’t find. Even starting very close to the zero, the solution by optimization goes somewhere else:

```
julia> optsol = optimize(solvopt!, nlsol.zero .+ (10 .* rand((szA, szE, szG))), ConjugateGradient(), Optim.Options(iterations=10_000, f_tol=1e-10, x_tol=1e-10, g_tol=1e-10); autodiff=:forward)
* Status: success
* Candidate solution
Final objective value: 3.816617e+06
* Found with
Algorithm: Conjugate Gradient
* Convergence measures
|x - x'| = 0.00e+00 ≤ 1.0e-10
|x - x'|/|x'| = 0.00e+00 ≤ 0.0e+00
|f(x) - f(x')| = 0.00e+00 ≤ 0.0e+00
|f(x) - f(x')|/|f(x')| = 0.00e+00 ≤ 1.0e-10
|g(x)| = 2.30e+09 ≰ 1.0e-10
* Work counters
Seconds run: 0 (vs limit Inf)
Iterations: 15
f(x) calls: 67
∇f(x) calls: 59
```

What could be happening?