Solving MCP with PathSolver with indexing

Hello,

I am trying to solve an MCP that requires performing operations across some indices.

Here is a MWE:

using JuMP
using Random
using LinearAlgebra
using BenchmarkTools

using JuMP
import PATHSolver
PATHSolver.c_api_License_SetString("2830898829&Courtesy&&&USR&45321&5_1_2021&1000&PATH&GEN&31_12_2025&0_0_0&6000&0_0")


struct MyArrays
    denom_i::Vector{Float64}
    v::Vector{Float64}
    u::Vector{Float64}
end

function make_arrays(S, J)
    MyArrays(zeros(S), zeros(S), zeros(J))
end

function example_one_go(J::Int, S::Int, A::Matrix, Guess_y::Vector{Float64}, z::Vector{Float64}, C::Matrix, Guess::Matrix, myarrays::MyArrays)
    model = Model(PATHSolver.Optimizer)
    set_silent(model)

    @variable(model, x[i = 1:J, j = 1:J] >= 0, start = Guess[i, j])
    y_fixed_1 = 1.0
    @variable(model, y[i = 2:J] >= 0, start = Guess_y[i])

    last_x = Vector{Float64}()

    function f(x_i, x_j, x...)
        i = round(Int, x_i)
        j = round(Int, x_j)
        new_x_vec = collect(x)

        if last_x === nothing || new_x_vec != last_x
            X = reshape(new_x_vec, J, J)
            denom_i = myarrays.denom_i
            @views mul!(denom_i, A, X[i, :])
            last_x = x
        end

        return sum(A[s, j] / denom_i[s] for s in 1:S) * (1 / S)
    end

    function ∇f(g, x_i, x_j, x...)
        local i = round(Int, x_i)
        local j = round(Int, x_j)
        new_x_vec = collect(x)

        if last_x === nothing || new_x_vec != last_x
            X = reshape(new_x_vec, J, J)
            fill!(g, 0.0)
            denom_i = myarrays.denom_i
            @views mul!(denom_i, A, X[i, :])
            v = myarrays.v
            u = myarrays.u

            @inbounds @simd for s in 1:S
                v[s] = A[s, j] / (denom_i[s]^2)
            end

            @views mul!(u, transpose(A), v)
            for q in 1:J
                local col = 2 + (q - 1) * J + i
                g[col] = -(1 / S) * u[q]
            end

            last_x = x
        end
        return
    end

    get_y(i) = (i == 1) ? y_fixed_1 : y[i]

    @operator(model, op_f, 2 + J * J, f, ∇f)
    @constraint(model, comp1[i in 1:J, j in 1:J], -get_y(i) * op_f(i, j, x...) + get_y(j) * C[i, j] / z[j] ⟂ x[i, j])
    @constraint(model, comp2[i in 2:J], -sum(x[j, i] * C[i, j] for j in 1:J) + z[i] ⟂ y[i])

    JuMP.optimize!(model)

    Y = vcat(1.0, collect(value.(y)))
    return value.(x), Y, ((Y ./ z) .* C) .* value.(x)
end

# Example inputs to run the function
J = 10
S = 100
Guess_y = ones(J)
z = normalize(rand(J), 1) .+ 0.1  # Normalize z to have unit sum
C = ones(J, J)
for i in 1:J
    for j in 1:J
        if i != j
            C[i, j] = 1.25
        end
    end
end
Guess = ones(J, J)
A = rand(S,J) .+ 0.1  # Normalize and shift values to avoid zeros


myarrays = make_arrays(S, J)

# Running the function
result_x, result_Y, transformed_matrix =  example_one_go(J, S, A, Guess_y, z, C, Guess, myarrays)

I have a couple of questions:

  1. Are there any performance gains to be achieved in this code? In my true application, both J and S are in the thousands.

  2. Is this code amenable to using other MCP solvers such as GAMS or KNitro? Or the syntax/JuMP structure does not translate well?

  3. Is there any way that is useful for performance in which instead of looping over i,j, I just do everything once filling up a large (possible SParse) matrix, and then call that matrix to evaluate the constraints?

I’d write your model as:

using JuMP
import LinearAlgebra
import PATHSolver

function example_one_go(
    J::Int,
    S::Int,
    A::Matrix,
    Guess_y::Vector{Float64},
    z::Vector{Float64},
    C::Matrix,
    Guess::Matrix,
)
    model = Model(PATHSolver.Optimizer)
    @variables(model, begin
        x[i = 1:J, j = 1:J] >= 0, (start = Guess[i, j])
        y[i = 1:J] >= 0, (start = Guess_y[i])
        denom[i in 1:J, s in 1:S] >= 1e-4, (start = A[s, :]' * Guess[i, :])
    end)
    fix(y[1], 1.0; force = true)
    @constraints(model, begin
        [i in 1:J, s in 1:S], A[s, :]' * x[i, :] - denom[i, s] ⟂ denom[i, s]
        [i in 1:J, j in 1:J],
            -y[i] * sum(A[s, j] / denom[i, s] for s in 1:S) / S + y[j] * C[i, j] / z[j] ⟂ x[i, j]
        [i in 2:J], -C[i, :]' * x[:, i] + z[i] ⟂ y[i]
    end)
    optimize!(model)
    @assert is_solved_and_feasible(model)
    X, Y = value.(x), value.(y)
    return X, Y, Y ./ z .* C .* X
end

# Example inputs to run the function
J, S = 10, 100
A = rand(S, J) .+ 0.1
Guess_y = ones(J)
z = LinearAlgebra.normalize(rand(J), 1) .+ 0.1
C = [i == j ? 1.0 : 1.25 for i in 1:J, j in 1:J]
Guess = ones(J, J)
@time ret = example_one_go(J, S, A, Guess_y, z, C, Guess)
  1. Unless you have other things going on, using a user-defined operator is more hassle than it’s worth. I assume you went that way because JuMP’s default AD system doesn’t exploit common subexpressions in the denominator, so I’ve used a trick of adding a variable to represent the expression. This also lets you add bounds on the denominator which fixes a bunch of iterations like this in PATH:
    2     1    15     3 nan  1.0e+00 SD nan 5.6e-16 (comp1[1,2)
  1. I don’t have a GAMS license so I can’t comment. In theory KNITRO supports this, but it really supports only 0 <= x ⟂ y >= 0 where x and y are variables, so JuMP will add extra variables and nonlinear equality constraints. This might defeat the benefit of using KNITRO.

  2. No. Looping over indices is fine.

1 Like

Hi @odow , thanks for this suggestion.

I am implementing your suggestion with the function example_one_go_v2, in the following MWE:

using JuMP
using Random
using LinearAlgebra
using BenchmarkTools

using JuMP
import PATHSolver
PATHSolver.c_api_License_SetString("2830898829&Courtesy&&&USR&45321&5_1_2021&1000&PATH&GEN&31_12_2025&0_0_0&6000&0_0")


struct MyArrays
    denom_i::Vector{Float64}
    v::Vector{Float64}
    u::Vector{Float64}
end

function make_arrays(S, J)
    MyArrays(zeros(S), zeros(S), zeros(J))
end

function example_one_go(J::Int, S::Int, A::Matrix, Guess_y::Vector{Float64}, z::Vector{Float64}, C::Matrix, Guess::Matrix, myarrays::MyArrays)
    model = Model(PATHSolver.Optimizer)
    set_silent(model)

    @variable(model, x[i = 1:J, j = 1:J] >= 0, start = Guess[i, j])
    y_fixed_1 = 1.0
    @variable(model, y[i = 2:J] >= 0, start = Guess_y[i])

    last_x = Vector{Float64}()

    function f(x_i, x_j, x...)
        i = round(Int, x_i)
        j = round(Int, x_j)
        new_x_vec = collect(x)

        if last_x === nothing || new_x_vec != last_x
            X = reshape(new_x_vec, J, J)
            denom_i = myarrays.denom_i
            @views mul!(denom_i, A, X[i, :])
            last_x = x
        end

        return sum(A[s, j] / denom_i[s] for s in 1:S) * (1 / S)
    end

    function ∇f(g, x_i, x_j, x...)
        local i = round(Int, x_i)
        local j = round(Int, x_j)
        new_x_vec = collect(x)

        if last_x === nothing || new_x_vec != last_x
            X = reshape(new_x_vec, J, J)
            fill!(g, 0.0)
            denom_i = myarrays.denom_i
            @views mul!(denom_i, A, X[i, :])
            v = myarrays.v
            u = myarrays.u

            @inbounds @simd for s in 1:S
                v[s] = A[s, j] / (denom_i[s]^2)
            end

            @views mul!(u, transpose(A), v)
            for q in 1:J
                local col = 2 + (q - 1) * J + i
                g[col] = -(1 / S) * u[q]
            end

            last_x = x
        end
        return
    end

    get_y(i) = (i == 1) ? y_fixed_1 : y[i]

    @operator(model, op_f, 2 + J * J, f, ∇f)
    @constraint(model, comp1[i in 1:J, j in 1:J], -get_y(i) * op_f(i, j, x...) + get_y(j) * C[i, j] / z[j] ⟂ x[i, j])
    @constraint(model, comp2[i in 2:J], -sum(x[j, i] * C[i, j] for j in 1:J) + z[i] ⟂ y[i])

    JuMP.optimize!(model)

    Y = vcat(1.0, collect(value.(y)))
    return value.(x), Y, ((Y ./ z) .* C) .* value.(x)
end

function example_one_go_v2(
    J::Int,
    S::Int,
    A::Matrix,
    Guess_y::Vector{Float64},
    z::Vector{Float64},
    C::Matrix,
    Guess::Matrix,)
    model = Model(PATHSolver.Optimizer)
    set_silent(model)
    @variables(model, begin
        x[i = 1:J, j = 1:J] >= 0, (start = Guess[i, j])
        y[i = 1:J] >= 0, (start = Guess_y[i])
        denom[i in 1:J, s in 1:S] >= 1e-8, (start = A[s, :]' * Guess[i, :])
    end)
    fix(y[1], 1.0; force = true)
    @constraints(model, begin
        [i in 1:J, s in 1:S], A[s, :]' * x[i, :] - denom[i, s] ⟂ denom[i, s]
        [i in 1:J, j in 1:J],
            -y[i] * sum(A[s, j] / denom[i, s] for s in 1:S) / S + y[j] * C[i, j] / z[j] ⟂ x[i, j]
        [i in 2:J], -C[i, :]' * x[:, i] + z[i] ⟂ y[i]
    end)
    optimize!(model)
    #@assert is_solved_and_feasible(model)
    X, Y = value.(x), value.(y)
    return X, Y, Y ./ z .* C .* X
end

# Example inputs to run the function
J = 20
S = 1000
Guess_y = ones(J)
z = normalize(rand(J), 1) .+ 0.1  # Normalize z to have unit sum

x_coord = [x for x = 1 :J]
y_coord = ones(J)

function distance1(elas,x,y)
    dimJ = size(x)[1]
    DistMat = zeros(size(x)[1],size(x)[1])
        for i=1:dimJ, j=1:dimJ
            DistMat[i,j] = (1+sqrt((x[i]-x[j])^2+(y[i]-y[j])^2))^elas
        end    
    return DistMat    
end

C = distance1(0.1,x_coord,y_coord)

Guess = ones(J, J)
A = rand(S,J) .+ 0.1  # Normalize and shift values to avoid zeros
myarrays = make_arrays(S, J)

# Running the function
ret_v1 = @btime   example_one_go(J, S, A, Guess_y, z, C, Guess, myarrays)

ret_v2 = @btime  example_one_go_v2(J, S, A, Guess_y, z, C, Guess)

isapprox(ret_v1[2],ret_v2[2])

isapprox(ret_v1[1],ret_v2[1])

and at least for these values, the original function is much faster, and also more “robust” which is something that really surprised me, because I agree that iterations in which the denominator is 0 are troublesome.

In particular, if I run with J = 10,

# Running the function
ret_v1 = @btime   example_one_go($J, $S, $A, $Guess_y, $z, $C, $Guess, $myarrays)

ret_v2 = @btime  example_one_go_odow($J, $S, $A, $Guess_y, $z, $C, $Guess)

isapprox(ret_v1[2],ret_v2[2])

isapprox(ret_v1[1],ret_v2[1])

I get:

764.341 ms (45807012 allocations: 883.98 MiB)
3.741 s (5831544 allocations: 414.33 MiB)
true
true

with J = 15,

 1.769 s (98376055 allocations: 2.01 GiB)
14.397 s (12009241 allocations: 896.22 MiB)
true
false

but this false is noise (difference of 1e-8).

But when I do J = 20, for example, then the second code crashes.

 2.891 s (155912056 allocations: 3.63 GiB)
 55.848 s (20321527 allocations: 1.43 GiB)

and when I uncomment the assert the function fails.

I do like that the function you propose makes much less allocations and uses much less data. This might be very important for scaling up purposes. But it seems this is coming at the expense of performance and reliability. This to me is very odd, because your proposed code is very transparent.

Am I missing something, or I messed up the function you proposed in a way I cannot find.

Not sure of the reason. Have you looked at the log from PATH? Is the time being spent in JuMP? Or is PATH taking many more iterations to converge? You might consider making the lower bound of the denominator larger.

If you’re going to manually code the gradient, you should almost just use PATHSolver.solve_mcp directly:

Thanks, the logs are, for J = 10, S=1000

For example_one_go

Path 5.0.03 (Fri Jun 26 10:05:33 2020)
Written by Todd Munson, Steven Dirkse, Youngdae Kim, and Michael Ferris


Crash Log
major  func  diff  size  residual    step       prox   (label)
    0     0             1.2615e+01             0.0e+00 (comp2[2)
    1     1    17   109 6.2502e+00  1.0e+00    0.0e+00 (comp2[2)
pn_search terminated: no progress.

Major Iteration Log
major minor  func  grad  residual    step  type prox    inorm  (label)
    0     0    14     2 6.2502e+00           I 0.0e+00 5.3e+00 (comp2[2)
    1    13    15     3 4.7591e-02  1.0e+00 SM 0.0e+00 2.7e-02 (comp1[1,6)
    2   119    16     4 -nan(ind)  1.0e+00 SD 0.0e+00 -nan(ind) (comp1[1,1)
    3    46    17     5 -nan(ind)  1.0e+00 SD -nan(ind) -nan(ind) (comp1[1,1)
    4    11    18     6 -nan(ind)  1.0e+00 SD -nan(ind) -nan(ind) (comp1[1,1)
    5     3    19     7 -nan(ind)  1.0e+00 SD -nan(ind) -nan(ind) (comp1[1,1)
    6     1    20     8 -nan(ind)  1.0e+00 SD -nan(ind) -nan(ind) (comp1[1,1)
    7     1    21     9 -nan(ind)  1.0e+00 SD -nan(ind) -nan(ind) (comp1[1,1)
    8     1    22    10 -nan(ind)  1.0e+00 SD -nan(ind) -nan(ind) (comp1[1,1)
    9     1    23    11 -nan(ind)  1.0e+00 SD -nan(ind) -nan(ind) (comp1[1,1)
   10     1    24    12 -nan(ind)  1.0e+00 SD -nan(ind) -nan(ind) (comp1[1,1)
   11     1    26    13 8.1305e-01  8.0e-01 SW 0.0e+00 2.9e-01 (comp1[6,2)
   12    66    27    14 2.1668e-01  1.0e+00 SO 0.0e+00 7.2e-02 (comp1[6,2)
   13    26    28    15 9.5901e-02  1.0e+00 SO 0.0e+00 4.0e-02 (comp1[9,7)
   14     6    29    16 2.0485e-02  1.0e+00 SO 0.0e+00 8.0e-03 (comp1[9,7)
   15     7    30    17 1.4439e-02  1.0e+00 SO 0.0e+00 1.0e-02 (comp1[1,4)
   16     2    31    18 8.4694e-04  1.0e+00 SO 0.0e+00 4.8e-04 (comp1[1,4)
   17     1    32    19 5.1479e-06  1.0e+00 SO 0.0e+00 3.6e-06 (comp1[1,3)
   18     1    33    20 3.0525e-10  1.0e+00 SO 0.0e+00 2.2e-10 (comp1[1,3)

Major Iterations. . . . 18
Minor Iterations. . . . 307
Restarts. . . . . . . . 0
Crash Iterations. . . . 1
Gradient Steps. . . . . 0
Function Evaluations. . 33
Gradient Evaluations. . 20
Basis Time. . . . . . . 0.000000
Total Time. . . . . . . 0.375000
Residual. . . . . . . . 3.052528e-10
  0.389121 seconds (24.48 M allocations: 475.030 MiB, 8.65% gc time)

and for example_one_go_v2

Path 5.0.03 (Fri Jun 26 10:05:33 2020)
Written by Todd Munson, Steven Dirkse, Youngdae Kim, and Michael Ferris

Preprocessed size   : 10109

Crash Log
major  func  diff  size  residual    step       prox   (label)
    0     0             1.2615e+01             0.0e+00 ()
    1    12     0 10109 1.2614e+01  5.4e-05    0.0e+00 ()
pn_search terminated: no basis change.

Major Iteration Log
major minor  func  grad  residual    step  type prox    inorm  (label)
    0     0    13     2 1.2614e+01           I 0.0e+00 1.2e+01 ()
    1   114    14     3 1.5193e-01  1.0e+00 SM 0.0e+00 5.2e-02 ()
    2    17    15     4 7.5712e-02  1.0e+00 SO 0.0e+00 2.6e-02 ()
    3   121    17     5 6.4499e+00  8.0e-01 CB 0.0e+00 6.4e+00 ()
    4   335    19     6 1.3900e+00  8.0e-01 CB 0.0e+00 1.4e+00 ()
    5   161    20     7 1.4274e+00  1.0e+00 CO 0.0e+00 1.4e+00 ()

Minor Iteration Log
minor      t          z     w     v   art ckpts  enter    leave
  500  1.8984e-03 10019    50     0    39     9 w[   52] z[  101]
    6    16    21     8 1.0775e+02  1.0e+00 SD 1.4e-01 9.8e+00 ()
    7   205    22     9 1.0768e+02  1.0e+00 CD 5.7e-02 9.8e+00 ()
    8   264    23    10 1.0768e+02  1.0e+00 CD 2.3e-02 9.8e+00 ()
    9   272    24    11 1.0768e+02  1.0e+00 CD 9.1e-03 9.8e+00 ()
   10   114    25    12 1.0768e+02  1.0e+00 CD 3.7e-03 9.8e+00 ()
   11   308    26    13 1.0768e+02  1.0e+00 CD 1.5e-03 9.8e+00 ()
   12    54    28    14 7.5303e-02  1.0e+00 RG 0.0e+00 2.6e-02 ()
   13    14    29    15 3.7618e-02  1.0e+00 SO 0.0e+00 1.3e-02 ()
   14   403    31    16 7.4297e+00  8.0e-01 CB 0.0e+00 7.3e+00 ()
   15   171    33    17 4.8264e+00  8.0e-01 CB 0.0e+00 4.6e+00 ()
   16   318    34    18 5.6495e+01  1.0e+00 CD 0.0e+00 6.1e+00 ()
   17    85    35    19 2.3467e+02  1.0e+00 CD 0.0e+00 1.1e+02 ()
   18   114    36    20 2.3405e+02  1.0e+00 CD 0.0e+00 1.1e+02 ()
   19    23    38    21 3.7416e-02  1.0e+00 RG 0.0e+00 1.3e-02 ()

Minor Iteration Log
minor      t          z     w     v   art ckpts  enter    leave
  500 -1.7129e-04 10055    53     0     0    12 w[   46] z[   56]

Major Iteration Log
major minor  func  grad  residual    step  type prox    inorm  (label)
   20   503    45    22 6.2490e+00  1.3e-01 CB 0.0e+00 6.2e+00 ()

Minor Iteration Log
minor      t          z     w     v   art ckpts  enter    leave
  500 -1.0592e-03 10053    55     0     0    13 w[   36] z[   26]

Major Iteration Log
major minor  func  grad  residual    step  type prox    inorm  (label)
   21   944    46    23 4.8430e+00  1.0e+00 CO 0.0e+00 4.8e+00 ()
   22    23    47    24 4.8430e+00  1.0e+00 CO 0.0e+00 4.8e+00 ()
   23    33    48    25 4.8430e+00  1.0e+00 CO 0.0e+00 4.8e+00 ()
   24    38    63    26 4.8430e+00  3.5e-08 RB 0.0e+00 4.8e+00 ()
   25    24    75    27 3.7404e-02  3.2e-04 CG 0.0e+00 1.3e-02 ()

Minor Iteration Log
minor      t          z     w     v   art ckpts  enter    leave
  500 -2.0783e-03 10046    62     0     0    12 w[   62] z[   82]

Major Iteration Log
major minor  func  grad  residual    step  type prox    inorm  (label)
   26   610    76    28 2.9577e-02  1.0e+00 CO 0.0e+00 1.0e-02 ()
   27   275    84    29 2.8827e+00  5.5e-02 CB 0.0e+00 2.9e+00 ()
   28    94   107    30 2.9575e-02  2.8e-02 CG 0.0e+00 1.0e-02 ()
   29    38   133    31 2.9573e-02  3.2e-04 RG 0.0e+00 1.0e-02 ()

Minor Iteration Log
minor      t          z     w     v   art ckpts  enter    leave
  500 -2.9705e-02 10050    58     0     0    35 z[   61] w[   81]
 1000 -3.6487e-02 10038    70     0     0    71 w[   41] w[   12]

Restart Log
proximal_perturbation 0
crash_method none
crash_perturb yes
nms_initial_reference_factor 2
lemke_start_type slack
proximal_perturbation 1.0000e-01

Major Iteration Log
major minor  func  grad  residual    step  type prox    inorm  (label)
   29     0   160    32 1.2615e+01           R 1.0e-01 1.2e+01 ()
   30    13   161    33 9.4936e+01  1.0e+00 SD 4.0e-02 1.7e+00 ()
   31    13   162    34 3.6983e+00  1.0e+00 SO 1.6e-02 1.5e-01 ()
   32    11   163    35 2.4337e+00  1.0e+00 SO 6.4e-03 6.2e-02 ()
   33     7   164    36 4.0659e-01  1.0e+00 SO 2.6e-03 1.4e-02 ()
   34     6   165    37 1.9530e-01  1.0e+00 SO 1.0e-03 8.2e-03 ()
   35    30   166    38 1.6286e-01  1.0e+00 SO 4.1e-04 2.5e-02 ()
   36     7   167    39 2.9250e-02  1.0e+00 SO 1.6e-04 2.9e-03 ()
   37     2   168    40 3.4643e-03  1.0e+00 SO 6.6e-05 2.5e-04 ()
   38     1   169    41 2.3248e-04  1.0e+00 SO 2.6e-05 1.2e-05 ()
   39     1   170    42 6.7830e-06  1.0e+00 SO 1.0e-05 3.5e-07 ()
   40     1   171    43 1.3597e-08  1.0e+00 SO 6.8e-07 7.0e-10 ()

Major Iterations. . . . 40
Minor Iterations. . . . 7289
Restarts. . . . . . . . 1
Crash Iterations. . . . 1
Gradient Steps. . . . . 6
Function Evaluations. . 171
Gradient Evaluations. . 43
Basis Time. . . . . . . 18.594000
Total Time. . . . . . . 20.593000
Residual. . . . . . . . 1.359654e-08
Postsolved residual: 1.3597e-08
 21.027442 seconds (5.83 M allocations: 414.477 MiB, 0.80% gc time)

as of using the solve_mcp directly I am little bit lost, do you know where can I find a MWE, or how would you adjust this example?

See PATHSolver.jl/README.md at master · chkwon/PATHSolver.jl · GitHub

But now that I’ve looked at your example a little more deeply, I’m not sure that it is correct, because it doesn’t account for the different i and j at the same x.

How about something like:

function example_one_go(
    J::Int,
    S::Int,
    A::Matrix,
    Guess_y::Vector{Float64},
    z::Vector{Float64},
    C::Matrix,
    Guess::Matrix,
)
    denom_i, v, last = zeros(S), zeros(S), nothing
    function update_cache(i, j, x)
        if (i, j, x) != last
            denom_i .= A * collect(x[i:J:end])
            for s in 1:S
                v[s] = A[s, j] / denom_i[s]^2
            end
            last = (i, j, x)
        end
        return
    end
    function f(x_i, x_j, x...)
        i, j = round(Int, x_i), round(Int, x_j)
        update_cache(i, j, x)
        return sum(A[s, j] / denom_i[s] for s in 1:S) / S
    end
    function ∇f(g, x_i, x_j, x...)
        i, j = round(Int, x_i), round(Int, x_j)
        update_cache(i, j, x)
        fill!(g, 0.0)
        for q in 1:J
            g[2+J*(q-1)+i] = -sum(A[s, q] * v[s] for s in 1:S) / S
        end
        return
    end
    model = Model(PATHSolver.Optimizer)
    @operator(model, op_f, 2 + J * J, f, ∇f)
    @variable(model, x[i = 1:J, j = 1:J] >= 0, start = Guess[i, j])
    @variable(model, y[i = 1:J] >= 0, start = Guess_y[i])
    fix(y[1], 1.0; force = true)
    @constraints(model, begin
        [i in 1:J, j in 1:J], -y[i] * op_f(i, j, x...) + y[j] * C[i, j] / z[j] ⟂ x[i, j]
        # 0.0 ⟂ y[1]  # Added by default
        [i in 2:J], -sum(x[j, i] * C[i, j] for j in 1:J) + z[i] ⟂ y[i]
    end)
    optimize!(model)
    @assert is_solved_and_feasible(model)
    X, Y = value.(x), value.(y)
    return X, Y, Y ./ z .* C .* X
end

Can you clarify what do you mean by “it doesn’t account for the different i and j at the same x”?

Your code checks only if new_x_vec != last_x, but it might be called with the same value of x but a different i. In which case, denom_i isn’t going to be correct.

You don’t hit the bug because last_x === nothing || new_x_vec != last_x is always true (check it) because you have last_x = x instead of last_x = new_x_vec, so your clever caching trick isn’t actually doing anything.

Ah! Great catch. Thanks for this! If I may abuse your time, can I ask four questions?

  1. if (i, j, x) != last in this line why are you including the j. I mean, in your code I understand why you are doing this, because of the v. But could it be faster to just update denom_i in the update_cache which only would be triggered by different i and different x.
  2. Does it make sense to clamp denom_i? Or that usually hurts performance?
  3. Do you think that using solve_mcp will provide large performance improvements?
  4. Are there any limits to how many J or S I can use with PATH? Or the limits are given by my machine’s specs?

Again, thanks!

  1. Sure, you could be cleverer with the caching
  2. I don’t understand. Clamp to what values?
  3. Not really
  4. It’s problem dependent. You won’t know until you try
1 Like