Hi I am using NLOP and MMA for optimization. The optimizer stops the optimization and reach the Xtol_rel while I have large gradients which should make optimizer keep going. Could you please help me understand what is the issue and what changes I should make? Thank you
function gf_p_optimize(p_init; r, β, η, TOL=1e-4, MAX_ITER, fem_params, global_iter_start)
##################### Optimize #################
opt = Opt(:LD_MMA, fem_params.np)
opt.lower_bounds = 0.001
opt.upper_bounds = 1
opt.xtol_rel = TOL
opt.maxeval = MAX_ITER
global_iter = global_iter_start
iteration_solutions = Any[]
function objective_fn(p0, grad)
global_iter += 1
push!(iteration_solutions, p0)
pf_vec = pf_p0(p0; r, fem_params)
pfh = FEFunction(fem_params.Pf, pf_vec)
pth = (pf -> Threshold(pf; β, η)) ∘ pfh
writevtk(Ω,"ldtwrl$global_iter",cellfields= ["p_opt"=>p0,"pfh"=>pfh,"pth"=>pth])
println("called from iteration $global_iter")
return gf_p(p0, grad;iteration_counter=global_iter, r, β, η, fem_params)
end
opt.min_objective = (p0, grad) -> objective_fn(p0, grad)
(g_opt, p_opt, ret) = optimize(opt, p_init)
@show numevals = opt.numevals # the number of function evaluations
println("got $g_opt at $p_opt after $numevals iterations (returned $ret)")
open("gopt.txt", "a") do io
write(io, "$g_opt \n")
end
return g_opt, p_opt, global_iter
end