Using sparse matrix in NLoptimization

Hi,
I’m new to julia, nowadays I’m trying to use JuMP to do NLoptimization, which I used to use fmincon() in matlab. In my problem I will have a 1*144 matrix as input:

model=Model(with_optimizer(Ipopt.Optimizer))
    @variable(model,x[i=1:total_nonzero],start=x0[i])
    register(model, :myfun_travel, 144, myfun_travel; autodiff = true)
    @NLobjective(model,Min,myfun_travel(x...))
    @constraint(model,A_ineq*x.<=b_ineq)
    @constraint(model,A_eq*x.==b_eq)

However, in my value function myfun_travel I will have many process matrix, e.g., P, P is defined as

n=12
P=Array{Any,2}(undef,n,n)
P[:,:]=zeros(n,n)

for i=1:n*n
    if v_place[i+1]<=n*n
        if rem(v_place[i+1],n)!=0
            P[convert(Int64,rem(v_place[i+1],n)),convert(Int64,floor(v_place[i+1]/n))+1]=x[i]
        else
            P[12,convert(Int64,floor(v_place[i+1]/n))]=x[i]

        end
    end
end

in this problem I use Any because that some entries of P is VariableRef but others are constant.
Further I have

F=Array{Any,2}(undef,n*(tau+w_max_int),n)
F[:,:]=zeros(n*(tau+w_max_int),n)
R = S .* F[n+1:9*n,:]#S is a constant matrix

and many other matrix operation like above. Actually the code still goes well until now. However, to improve the efficiency of my work, I try to use sparse().
If I use F=sparse(F), an error will through out
ERROR: MethodError: no method matching zero(::Type{Any})
It seems that I cannot use a sparse matrix with type Any to do .* or +
similar example is shown as follow:

matrix=Any[1 1;1 2]
matrix=sparse(matrix)
matrix.*matrix##doesn't work
matrix-matrix##doesn't work

So I don’t know how to solve this problem. My program works well with all matrix in dense form, but it will takes three hours to finish optimization, which is thirty second in matlab, because of the low speed of operating too many dense matrix multiplication in my code. In matlab I can easily use sparse matrix to do so.

full code is shown as follow:

#main.jl
using Base,SparseArrays,LinearAlgebra,JuMP,KNITRO,Ipopt
include("op_myfun_travel.jl")
W=[1 3 3 6 4 6 3 4 8 4 6 6;
    3 1 5 4 3 4 5 5 6 3 5 5;
    3 5 1 7 6 8 3 3 9 4 8 7;
    6 4 7 1 5 6 5 7 5 6 6 7;
    4 3 6 5 1 3 6 6 6 3 4 4;
    6 4 8 6 3 1 7 7 4 6 2 3;
    3 5 3 5 6 7 1 4 7 5 7 7;
    4 5 3 7 6 7 4 1 9 3 7 5;
    8 6 9 5 6 4 7 9 1 8 5 6;
    4 3 4 6 3 6 5 3 8 1 5 3;
    6 5 8 6 4 2 7 7 5 5 1 3;
    6 5 7 7 4 3 7 5 6 3 3 1]


n=12
A = ones(n,n)
w_max=maximum((maximum(W,dims=1)),dims=2)
w_max_int=w_max[1,1]

count = 1
location_nonzero = zeros(n,n)
v_place=spzeros(1,1)
S = zeros(n^2,n*w_max_int)
D = zeros(1,n^2)
K = zeros(n,n^2)
for i = 1:n
    for j = 1:n
        if A[i,j] == 1
            global count
            location_nonzero[i,j] = count
            count = count + 1
            global v_place
            v_place=hcat(v_place,j+(i-1)*n)
        end
        S[j+(i-1)*n,j+(w_max_int-convert(Int64,W[i,j]))*n] = 1
    end
    D[1+(i-1)*n:i*n] = (i-1)*n+1:n^2+1:n^3
    K[i,1+(i-1)*n:i*n]=ones(1,n)
end
#######################if use sparse matrix, error will throw out###########
#S = sparse(S)
#K=sparse(K)
#################################################################
row_nonzero = sum(A,dims=2)

total_nonzero=count-1

tmp = zeros(1,total_nonzero)
tmp[1,1:convert(Int64,row_nonzero[1])] = ones(1,convert(Int64,row_nonzero[1]))
A_eq = tmp
for i = 2:n
    tmp = zeros(1,total_nonzero)
    tmp[1,convert(Int64,sum(row_nonzero[1:i-1],dims=1)[1]+1):convert(Int64,sum(row_nonzero[1:i-1],dims=1)[1]+row_nonzero[i])] = ones(1,convert(Int64,row_nonzero[i]))
    global A_eq
    A_eq = [A_eq;tmp]
end
b_eq = ones(n,1)

A_ineq = -Matrix{Float64}(I, total_nonzero, total_nonzero)
b_ineq = zeros(total_nonzero,1)
tau = 30

x0 = 1/n*ones(n^2)

function fmincon(x0,A_ineq,b_ineq,A_eq,b_eq)
    verbose=true
    model=Model(with_optimizer(Ipopt.Optimizer))

    global x,P,P_sparse
    @variable(model,x[i=1:total_nonzero],start=x0[i])
    register(model, :myfun_travel, 144, myfun_travel; autodiff = true)
    @NLobjective(model,Min,myfun_travel(x...))
    @constraint(model,A_ineq*x.<=b_ineq)
    @constraint(model,A_eq*x.==b_eq)

    if verbose
        print(model)
    end
    JuMP.optimize!(model)
    obj_value=JuMP.objective_value(model)
    x_value=JuMP.value.(x)

    if verbose
        println("Objective value: ", obj_value)
        println("x = ", x_value)
    end
end
fmincon(x0,A_ineq,b_ineq,A_eq,b_eq)

and

op_myfun_travel.jl
function op_myfun_travel(x...)
W_tmp=Array{Any,2}(undef,n,n)
global W,tau,n,v_place,w_max_int,S,D,K,cnt,P,F,P_sparse,R,W_tmp,F_cumul
P=Array{Any,2}(undef,n,n)
P[:,:]=zeros(n,n)

for i=1:n*n
    if v_place[i+1]<=n*n
        if rem(v_place[i+1],n)!=0
            P[convert(Int64,rem(v_place[i+1],n)),convert(Int64,floor(v_place[i+1]/n))+1]=x[i]
        else
            P[12,convert(Int64,floor(v_place[i+1]/n))]=x[i]

        end
    end
end

F=Array{Any,2}(undef,n*(tau+w_max_int),n)
F[:,:]=zeros(n*(tau+w_max_int),n)
F_cumul=Array{Any,2}(undef,n,n)
F_cumul=zeros(n,n)

P_sparse=Array{Any,2}(undef,n,n*n)

P_sparse = K.*repeat(P,1,n)
#######################if use sparse matrix, error will throw out in line 30 and 55
#P_sparse=sparse(P_sparse)
#F=sparse(F)
####################################################################
for k = w_max_int+1:tau+w_max_int
    R = S * F[(k-1-w_max_int)*n+1:(k-1)*n,:]

    for i=1:n*n
        if D[i]<=(n*n)*n
            if rem(D[i],n*n)==0
                R[n*n,convert(Int64,floor(D[i]/(n*n)))]=0
            else
                R[convert(Int64,rem(D[i],(n*n))),convert(Int64,floor(D[i]/(n*n)))+1]=0
            end
        end
    end

    F[(k-1)*n+1:k*n,:] = P_sparse * R
    if k<=2*w_max_int
        for i=1:n
            for j=1:n
                W_tmp[i,j]=W[i,j]
                if W_tmp[i,j]!=k-w_max_int
                    W_tmp[i,j]=0
                else
                    W_tmp[i,j]=P[i,j]
                end
            end
        end
        W_tmp=sparse(W_tmp)
        F[(k-1)*n+1:k*n,:] = F[(k-1)*n+1:k*n,:] + W_tmp
    end
    F_cumul = F_cumul + F[(k-1)*n+1:k*n,:]
end
f = -minimum((minimum(F_cumul,dims=1)),dims=2)

return f[1,1]
end

Thank you!

Welcome to JuMP!

Please provide a link if you post the same (or similar) question in multiple places: Function zero() with type"Any".

You should start by reading the performance tips in the Julia manual: https://docs.julialang.org/en/v1/manual/performance-tips/index.html.

The use of Any and global is likely the cause of the poor performance. As one work-around, instead of creating a sparse matrix to then multiply, you could try writing out the multiplication as a scalar expression.

If things are still slow after you’ve fixed the global variable issue, please take a read of PSA: make it easier to help you and simplify your example as much as possible. There are a few other things you could try to speed things up, but first things first.