How to get the variable value inside a JuMP model to insert it into a function?

Hi everyone!
I order to avoid unnecessary variables, I need to get a JuMP variable value in each iteration from model to insert it into a function and use it in a @NLexpression.

The function is

function fy1(x1,x2,x3,x4)
    ## VLE Vapour-liquid equilibrium parameters 
    solver = IpoptSolver()
    m = Model(solver=IpoptSolver(print_level=0))
P=14.6959;    # Atmospheric pressure [psia]
# Antonie vapor pressure coefficients
A=[7.51333 6468.10100 396.26520;
   7.43437 6162.36000 359.38260;
   6.68394 5414.96100 303.98640;
   6.30319 5225.32400 274.42910]; 
Pc= [923.7,929.3,966.4,1013.2];
    @variables(m, begin
     77.0 ≤ TFC ≤ 500.0    
        end)
    @NLobjective(m, Max, 1.0 )
    @NLexpression(m,  PsC1,  (exp(A[1,1] - A[1,2]/(TFC + A[1,3])))*Pc[1])
    @NLexpression(m,  PsC2,  (exp(A[2,1] - A[2,2]/(TFC + A[2,3])))*Pc[2])
    @NLexpression(m,  PsC3,  (exp(A[3,1] - A[3,2]/(TFC + A[3,3])))*Pc[3])
    @NLexpression(m,  PsC4,  (exp(A[4,1] - A[4,2]/(TFC + A[4,3])))*Pc[4])
    @NLexpression(m,  keq1,  PsC1/P )
    @NLexpression(m,  keq2,  PsC2/P )
    @NLexpression(m,  keq3,  PsC3/P )
    @NLexpression(m,  keq4,  PsC4/P )
    @NLexpression(m,  y1,  keq1*x1)
    @NLexpression(m,  y2,  keq2*x2)
    @NLexpression(m,  y3,  keq3*x3)
    @NLexpression(m,  y4,  keq4*x4)
    # VLE constraints 
    @NLconstraint(m, 1.0 - y1 - y2 - y3 - y4 == 0.0 )
    setvalue(TFC, 140.0)
    solve(m)
    y1 = getvalue(y1)
    y1 = convert(Float64, y1)
global y1
    return y1
end

And insert it into a simple model, e.g.

    solver = IpoptSolver()
m = Model(solver=solver)
    @variables(m, begin
        0.0 ≤RR≤ 1.0
        0.0 ≤x1≤ 1.0
        0.0 ≤x2 ≤ 1.0
        0.0 ≤x3≤ 1.0
        0.0 ≤x4≤ 1.0  
        end)
@NLobjective(m, Max, 20*x1 )
JuMP.register(m, :fy1, 4, fy1, autodiff=true )
@NLexpression(m, yC1,  fy2(x1,x2,x3,x4) )
@NLconstraint(m, x1*x2*yC1*yC1*RR[i] == 0.0  )
@NLconstraint(m, x3*x4*x1*yC1*RR[i] == 0.0  )
solve(m)

I could insert the function directly like constraint in my model but that gives me an infeasible solution (the model has 3000 variables).

The result that I got it is

Solving...
MethodError: Cannot `convert` an object of type ForwardDiff.Dual{ForwardDiff.Tag{JuMP.##166#168{#fy2},Float64},Float64,4} to an object of type Float64
This may have arisen from a call to the constructor Float64(...),
since type constructors fall back to convert methods.

Stacktrace:
 [1] fy2(::ForwardDiff.Dual{ForwardDiff.Tag{JuMP.##166#168{#fy2},Float64},Float64,4}, ::ForwardDiff.Dual{ForwardDiff.Tag{JuMP.##166#168{#fy2},Float64},Float64,4}, ::ForwardDiff.Dual{ForwardDiff.Tag{JuMP.##166#168{#fy2},Float64},Float64,4}, ::ForwardDiff.Dual{ForwardDiff.Tag{JuMP.##166#168{#fy2},Float64},Float64,4}) at ./In[5]:50
 [2] (::JuMP.##166#168{#fy2})(::Array{ForwardDiff.Dual{ForwardDiff.Tag{JuMP.##166#168{#fy2},Float64},Float64,4},1}) at /Users/christianrodriguez/.julia/v0.6/JuMP/src/nlp.jl:1363
 [3] vector_mode_gradient!(::SubArray{Float64,1,Array{Float64,1},Tuple{UnitRange{Int64}},true}, ::Function, ::SubArray{Float64,1,Array{Float64,1},Tuple{UnitRange{Int64}},true}, ::ForwardDiff.GradientConfig{ForwardDiff.Tag{JuMP.##166#168{#fy2},Float64},Float64,4,Array{ForwardDiff.Dual{ForwardDiff.Tag{JuMP.##166#168{#fy2},Float64},Float64,4},1}}) at /Users/christianrodriguez/.julia/v0.6/ForwardDiff/src/gradient.jl:102
 [4] gradient!(::SubArray{Float64,1,Array{Float64,1},Tuple{UnitRange{Int64}},true}, ::Function, ::SubArray{Float64,1,Array{Float64,1},Tuple{UnitRange{Int64}},true}, ::ForwardDiff.GradientConfig{ForwardDiff.Tag{JuMP.##166#168{#fy2},Float64},Float64,4,Array{ForwardDiff.Dual{ForwardDiff.Tag{JuMP.##166#168{#fy2},Float64},Float64,4},1}}, ::Val{true}) at /Users/christianrodriguez/.julia/v0.6/ForwardDiff/src/gradient.jl:35
 [5] (::JuMP.##167#169{JuMP.##166#168{#fy2},ForwardDiff.GradientConfig{ForwardDiff.Tag{JuMP.##166#168{#fy2},Float64},Float64,4,Array{ForwardDiff.Dual{ForwardDiff.Tag{JuMP.##166#168{#fy2},Float64},Float64,4},1}}})(::SubArray{Float64,1,Array{Float64,1},Tuple{UnitRange{Int64}},true}, ::SubArray{Float64,1,Array{Float64,1},Tuple{UnitRange{Int64}},true}) at /Users/christianrodriguez/.julia/v0.6/JuMP/src/nlp.jl:1365
 [6] eval_grad_f(::JuMP.UserFunctionEvaluator, ::SubArray{Float64,1,Array{Float64,1},Tuple{UnitRange{Int64}},true}, ::SubArray{Float64,1,Array{Float64,1},Tuple{UnitRange{Int64}},true}) at /Users/christianrodriguez/.julia/v0.6/JuMP/src/nlp.jl:1358
 [7] #forward_eval#7(::ReverseDiffSparse.UserOperatorRegistry, ::Function, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{ReverseDiffSparse.NodeData,1}, ::SparseMatrixCSC{Bool,Int64}, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}) at /Users/christianrodriguez/.julia/v0.6/ReverseDiffSparse/src/forward.jl:129
 [8] (::ReverseDiffSparse.#kw##forward_eval)(::Array{Any,1}, ::ReverseDiffSparse.#forward_eval, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{ReverseDiffSparse.NodeData,1}, ::SparseMatrixCSC{Bool,Int64}, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}) at ./<missing>:0
 [9] forward_eval_all(::JuMP.NLPEvaluator, ::Array{Float64,1}) at /Users/christianrodriguez/.julia/v0.6/JuMP/src/nlp.jl:443
 [10] eval_grad_f(::JuMP.NLPEvaluator, ::Array{Float64,1}, ::Array{Float64,1}) at /Users/christianrodriguez/.julia/v0.6/JuMP/src/nlp.jl:500
 [11] initialize(::JuMP.NLPEvaluator, ::Array{Symbol,1}) at /Users/christianrodriguez/.julia/v0.6/JuMP/src/nlp.jl:405
 [12] loadproblem!(::Ipopt.IpoptMathProgModel, ::Int64, ::Int64, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}, ::Symbol, ::JuMP.NLPEvaluator) at /Users/christianrodriguez/.julia/v0.6/Ipopt/src/IpoptSolverInterface.jl:44
 [13] _buildInternalModel_nlp(::JuMP.Model, ::JuMP.ProblemTraits) at /Users/christianrodriguez/.julia/v0.6/JuMP/src/nlp.jl:1248
 [14] #build#119(::Bool, ::Bool, ::JuMP.ProblemTraits, ::Function, ::JuMP.Model) at /Users/christianrodriguez/.julia/v0.6/JuMP/src/solvers.jl:300
 [15] (::JuMP.#kw##build)(::Array{Any,1}, ::JuMP.#build, ::JuMP.Model) at ./<missing>:0
 [16] #solve#116(::Bool, ::Bool, ::Bool, ::Array{Any,1}, ::Function, ::JuMP.Model) at /Users/christianrodriguez/.julia/v0.6/JuMP/src/solvers.jl:168
 [17] solve(::JuMP.Model) at /Users/christianrodriguez/.julia/v0.6/JuMP/src/solvers.jl:150

I will be grateful for any help you can provide

You may want to have a look at the JuMP docs regarding nonlinear modelling:
http://www.juliaopt.org/JuMP.jl/0.18/nlp.html#syntax-notes

The user-defined functions you pass in must be simple scalar operations. JuMP (via ForwardDiff) can’t differentiate the output of a NLP.

Why do you have to have two separate models? Can’t you combine the two?

Hi,
@shoshievass helped me.
JuMP can only autodiff algebraic expressions. I need to supply the gradient matrix of the output with respect to the input variables.
Well, in one model calculate parameters to use in the main model. When I put the two models, I obtained an infeasible solution.
I’m working in the gradient function.

1 Like