Hi,
Thank you for your response😊
The difference in values occurred when I changed the value of lambda. For most lambda values, the outputs are the same but for some like 0.8, 0.9, 1.2, etc. Julia produces a zero vector unlike R
start_vec = zeros(11);
gamma_vec = range(start=-7, stop=7, length=40);
rho = 0.8;
t_vec = range(0.0,5.0);
alpha = 0.05;
d = 6.0;
no_nodes_GL = 70;
lambda = 0.8;
num_ints = 20;
obj_fun_enclosed = x -> OBJ_ciuupi(x, lambda, t_vec, d, alpha, no_nodes_GL);
constraint_enclosed = x -> constr_func_ciuupi_nonconvex(x, t_vec, d, alpha, rho, gamma_vec, no_nodes_GL, num_ints);
model = Model(obj_fun_enclosed);
addvar!(model, fill(-Inf, length(start_vec)), fill(Inf, length(start_vec))) ;
add_ineq_constraint!(model,constraint_enclosed);
ad_model = abstractdiffy(model, AbstractDifferentiation.ForwardDiffBackend());
alg = NLoptAlg(:LD_SLSQP);
options = NLoptOptions(xtol_rel = 0, maxeval = 1000, ftol_rel = 0.0, ftol_abs = 0.0, xtol_abs = 0.0, stopval = -Inf);
sl = Nonconvex.optimize(ad_model, alg, start_vec, options = options);
show(sl.minimizer);
# Output: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
show(sl.status);
# Output: :ROUNDOFF_LIMITED
# To get the derivatives in R, have to change the options as list = Dict("xtol_rel" => 1e-6, "check_derivatives" => true)
r_opt = obj_ciuupi_optim(start_vec, lambda, t_vec, d, alpha, rho, gamma_vec, no_nodes_GL, num_ints);
opt_vals_dict = Dict{Symbol, Any}(names(r_opt) .=> values(r_opt));
show(map(Float64, opt_vals_dict[:par]));
# Output: [0.004672807142645505, 0.03097881117209773, 0.05474224840151757, 0.040294321056390345, 0.011066120260875884, -0.09080941990352809, -0.05818486407224111, 0.014911118555921398, 0.05491697364768549, 0.040140427197995626, 0.011163794813018022]
I’ve checked the gradients computed in Julia and R, and they appear to be the same.