@odow, thank you for your response. I later printed the time spent on Ipopt, which were still significantly different under the two cases. On my computer, when I use ` learned_model1`

, the Ipopt execution time is above 4 seconds; without using it, the execution time is less than 0.1 seconds. I include my code below. Look forward to your insights into it. Thank you.

```
# load packages
using LinearAlgebra
using JuMP, Ipopt
using MosekTools
using Convex
using Plots
using LaTeXStrings
using Printf
## system definition
gravity = 9.81;
m = 0.486;
J = 0.00383;
l = 0.25;
n= 6; # state dimension
nu = 2; # input dimension
II = 1.0* Matrix(I, n, n);
fx = x-> [x[4:6];-gravity*sin(X[i][3]); gravity*(cos(X[i][3])-1); 0]; #still works when x has multiple columns, used for vectorized computation
gx = x-> [zeros(3,2); -sin(X[i][3])/m 0; -cos(X[i][3])/m 0; 0 1/J];
N = 20; # prediction horizon
R = [1 1]';
dR = R*10;
Q = ones(6,1); # weights for the terminal state: x, y, theta, dx, dy, dtheta
x0 = vec([5.0 5.0 0 0 0 0]);
h= 0.005;
## formulate and solve the nonlinear programming problem using JuMP
MPC = Model(optimizer_with_attributes(
Ipopt.Optimizer,
"max_iter" => 500,
"print_level" => 3,
))
X = [@variable(MPC,[1:n]) for i in 1:N];
U = [@variable(MPC,[1:nu]) for i in 1:N];
@constraint(MPC,X[1].==x0); # initial value constraint
learned_model1(x...) = 0.0;
register(MPC, :learned_model1,n,learned_model1, Δlearned_model1);
# dynamics constraint (nonlinear):
for i in 1:N-1
@constraint(MPC,X[i][1]+h*X[i][4] == X[i+1][1]);
@constraint(MPC,X[i][2]+h*X[i][5] == X[i+1][2]);
@constraint(MPC,X[i][3]+h*X[i][6] == X[i+1][3]);
aux_xi = @variable(MPC, [1:n]);
@constraint(MPC, aux_xi.== X[i]);
############################ Execution time under the following two cases are significantly different###############
# write the learned model using globally defined functions.
# @NLconstraint(MPC,X[i][4]+h*(-gravity*sin(X[i][3])-sin(X[i][3])/m*(U[i][1]+learned_model1(aux_xi...))) == X[i+1][4]);
# @NLconstraint(MPC,X[i][5]+h*(gravity*(cos(X[i][3])-1)-cos(X[i][3])/m*(U[i][1]+learned_model1(aux_xi...))) == X[i+1][5]);
# write the learned model explicitly
@NLconstraint(MPC,X[i][4]+h*(-gravity*sin(X[i][3])-sin(X[i][3])/m*(U[i][1]+0)) == X[i+1][4]);
@NLconstraint(MPC,X[i][5]+h*(gravity*(cos(X[i][3])-1)-cos(X[i][3])/m*(U[i][1]+0)) == X[i+1][5]);
###################################################################################################################
@constraint(MPC,X[i][6]+h*(1/J*(U[i][2])) == X[i+1][6]);
end
obj_exp = sum((R'*(U[i].^2))[1] for i in 1:N) +
sum((dR'*((U[i]-U[i-1]).^2) + Q'*(X[i].^2))[1] for i in 2:N);
@variable(MPC, aux2);
@constraint(MPC,aux2 == obj_exp);
@objective(MPC, Min, aux2);
@time optimize!(MPC);
```