How to get the gradient of a constraint of an optimization problem at a fixed point

It’s a little but convoluted, but see Computing Hessians · JuMP.

Here’s how I would write and solve your model.

Note that:

  • I have swapped from the @NL syntax to the new JuMP nonlinear syntax (see ANN: JuMP v1.15 is released)
  • I used MOI.eval_constraint_gradient instead of _jacobian because you care only about one constraint. You could use the jacobian if you wanted
  • The gradient of the function in constraint Temp is 0 with respect to u[1] because u[1] does not appear in it. Is that what you expected?
julia> using JuMP

julia> import Ipopt

julia> import SparseArrays

julia> model = Model(Ipopt.Optimizer)
A JuMP Model
├ solver: Ipopt
├ objective_sense: FEASIBILITY_SENSE
├ num_variables: 0
├ num_constraints: 0
└ Names registered in the model: none

julia> @variables(model, begin
           Tc_out[1:2], (start = (41 + 105)*0.5)
           T, (start = 0.25 * (2*41 + 105 + 206))
           Th_out[1:2], (start = (41 + 105)*0.5)
           0 <= u[1:2] <= 1, (start = 0.5)
           Tc_in in Parameter(41.0)
           wc in Parameter(97.0)
           Th_in[1:2] in Parameter(105.0)
           wh[1:2] in Parameter(118.0)
           UA[1:2] in Parameter(240.0)
       end);

julia> set_parameter_value(Th_in[2], 206.0)

julia> set_parameter_value(wh[2], 94.0)

julia> set_parameter_value(UA[2], 502.0)

julia> @constraints(model, begin
           Tc_outL[i in 1:2], Tc_out[i] >= Tc_in
           Tc_outH[i in 1:2], Tc_out[i] <= Th_in[i]
           Th_outL[i in 1:2], Th_out[i] >= Tc_in
           Th_outH[i in 1:2], Th_out[i] <= Th_in[i]
           HTrans[i in 1:2],
               Tc_in - Tc_out[i] + UA[i] * (
                   (Th_in[i] - Tc_out[i]) * (Th_out[i] - Tc_in) * 0.5 * (Th_in[i] - Tc_out[i] + Th_out[i] - Tc_in)
               )^(1 / 3) / (u[i] * wc * 4.2) == 0
           ECon_cold, -T + Tc_out[1] * u[1] + Tc_out[2] * u[2] == 0
           ECon_HX[i in 1:2],
               (Th_in[i] - Th_out[i]) * wh[i] - (Tc_out[i] - Tc_in) * wc * u[i] == 0
           MCon, u[1] + u[2] == 1
           Temp,
               (Tc_out[2] - Tc_in)^2 / (Th_in[2] - Tc_in) -  (Tc_out[1] - Tc_in)^2 / (Th_in[1] - Tc_in) == 0
       end);

julia> function df_dx(model::Model, ci::ConstraintRef, x::VariableRef)
           function my_value(x::VariableRef)
               if is_parameter(x)
                   return parameter_value(x)
               else
                   return start_value(x)
               end
           end
           nlp = MOI.Nonlinear.Model()
           # Add ci as a constraint
           object = constraint_object(ci)
           MOI.Nonlinear.add_constraint(nlp, object.func, object.set)
           # Build the evaluator
           vars = all_variables(model)
           backend = MOI.Nonlinear.SparseReverseMode()
           evaluator = MOI.Nonlinear.Evaluator(nlp, backend, index.(vars))
           # Initialize
           MOI.initialize(evaluator, [:Jac])
           # Initialize the output structure
           sparsity = MOI.constraint_gradient_structure(evaluator, 1)
           ∇g = zeros(length(sparsity))
           # Compute gradient at `my_value(x)`
           MOI.eval_constraint_gradient(evaluator, ∇g, my_value.(vars), 1)
           # Convert to sparsevec
           J = SparseArrays.sparsevec(sparsity, ∇g, length(vars))
           # Return x element
           i = findfirst(==(x), vars)
           return J[i]
       end
df_dx (generic function with 1 method)

julia> df_dx(model, Temp, u[1])
0.0
2 Likes