WARNING: isnan(x::AbstractArray{T}) where T <: Number is deprecated, use isnan.(x) instead

Hi guys
I’m trying again to run a script file when suddenly I got this warning message. I couldn’t debug these warning and I don’t know what to do. Can somebody help to fix? I’m working on Julia 0.6.4 in Atom 1.29. My code is depicted bellow:

script file: sample.jl


# MDP problem description

# Get transition matrices
Tensor = Dict()
f = open(joinpath(@__DIR__, "Tensor1.txt"), "r+")
Tensor["1"] = readdlm(f)
g=f = open(joinpath(@__DIR__, "Tensor2.txt"), "r+")
Tensor["2"] = readdlm(g)

# Get reward function
Rewards = Dict()
h = open(joinpath(@__DIR__, "Rewards1.txt"), "r+")
Rewards["1"]   = readdlm(h)
i = open(joinpath(@__DIR__, "Rewards1.txt"), "r+")
Rewards["2"]   = readdlm(i)

# State space description
States = Dict()
States["#states"]  = size(Tensor["1"],1)
States["FeatSize"] = [2,162]

Actions = Dict()
Actions["#actions"] = 2

Constraints = Dict()
Constraints["#constraints"] = 1
Constraints["States"] = [52]

# Initial condition
init = 1
λ = 0.7

VXpolicy = CMDP(States,Actions,
                Tensor,Rewards,
                λ,init,zeros(3),Constraints)

(Values,Policy) = SolveCMDP(VXpolicy);

TotalStates = States["#states"]

MC = zeros(TotalStates,TotalStates)

for i = 1:TotalStates
   ind = Policy[i]
   MC[i,:] =  Tensor["$(ind)"][i,:]
end

script file: MDP_LP.jl

using SCS

type CMDP

    States::Dict{Any,Any}        # Dictionary describing states
    Actions::Dict{Any,Any}       # Dictionary describing actions

    Tensor::Dict{Any,Any}        # Dictionary for Tensor
    Rewards::Dict{Any,Any}       # Dictionary for Rewards

    λ::Float64                   # Discount factor

    InitState::Int               # Initial state
    InitStateArray::Array{Int,1} # Initial features

    Constraints::Dict{Any,Any}   # Array of state constraints (index of state)

end

function SolveCMDP(problem::CMDP)

    Ns = problem.States["#states"]
    Na = problem.Actions["#actions"]

    StateFeat = problem.States["FeatSize"]

    Tensor    = problem.Tensor
    Rewards   = problem.Rewards

    init      = problem.InitState
    Findex    = problem.InitStateArray

    NCon      = problem.Constraints["#constraints"]
    StateCon  = problem.Constraints["States"]

    λ = problem.λ

    if(init == 0)
        init = sub2ind(StateFeat[end:-1:1],Findex[end:-1:1])
    end

    # Note that the variables are the occupational measures
    X = Variable(Ns*Na)

    # Objective
    C = zeros(Ns*Na,1)
    for i=1:Na
        C[(i-1)*Ns+1:i*Ns,:] = Rewards["$(i)"]
    end

    # Equality constraint
    I = zeros(Ns,Ns*Na)
    T = zeros(Ns,Ns*Na)
    for i=1:Na
        I[:,(i-1)*Ns+1:i*Ns] = eye(Ns)
        T[:,(i-1)*Ns+1:i*Ns] = transpose(Tensor["$(i)"])
    end

    Aeq       = I - T
    Beq       = zeros(Ns,1)
    Beq[init] = 1

    if NCon > 0
        for i=1:NCon
            AeqC = zeros(Na,Ns*Na)
            BeqC = zeros(Na,1)

            for j=1:Na
                # State constraint index
                state_con_id         = (j-1)*Ns + StateCon[i]
                AeqC[j,state_con_id] = 1;

                Aeq = vcat(Aeq,AeqC)
                Beq = vcat(Beq,BeqC)
            end

        end
    end

    Aineq     = eye(2*Ns)
    Bineq     = zeros(Ns*Na,1)

    Constraint = [Aineq*X >= Bineq;Aeq*X == Beq]
    p = maximize(C'*X,Constraint)

    solve!(p, SCSSolver(verbose=0,max_iters=50000))

    Values = -Constraint[2].dual

    OccMeas = zeros(Ns,Na)
    ActDist = zeros(Ns,Na)
    OptAct  = zeros(Int,Ns)

    sol     = X.value

    for i=1:Na
        OccMeas[:,i] = sol[(i-1)*Ns+1:i*Ns]
    end

    for i=1:Ns
        normalizer = sum(OccMeas[i,:])
        for j=1:Na
            ActDist[i,j] = OccMeas[i,j]/normalizer
        end
        (maxval,index) = findmax(ActDist[i,:])
        OptAct[i] = index
    end

    return Values,OptAct end

WARNING: isnan(x::AbstractArray{T}) where T <: Number is deprecated, use isnan.(x) instead.
Stacktrace:
[1] depwarn(::String, ::Symbol) at ./deprecated.jl:70
[2] isnan(::Array{Float64,1}) at ./deprecated.jl:57
[3] populate_solution!(::SCS.SCSMathProgModel, ::Convex.Problem, ::Dict{UInt64,Tuple{Int64,Int64}}, ::Array{Convex.ConicConstr,1}) at /Users/marcelord12/.julia/v0.6/Convex/src/solution.jl:118
[4] #solve!#25(::Bool, ::Bool, ::Bool, ::Function, ::Convex.Problem) at /Users/marcelord12/.julia/v0.6/Convex/src/solution.jl:38
[5] #solve!#24(::Array{Any,1}, ::Function, ::Convex.Problem, ::SCS.SCSSolver) at /Users/marcelord12/.julia/v0.6/Convex/src/solution.jl:13
[6] SolveCMDP(::CMDP) at /Users/marcelord12/Documents/phd2018/mppo/thesis/MDP/LP/MDP_LP.jl:87
[7] include_string(::String, ::String) at ./loading.jl:522
[8] include_string(::String, ::String, ::Int64) at /Users/marcelord12/.julia/v0.6/CodeTools/src/eval.jl:30
[9] include_string(::Module, ::String, ::String, ::Int64, ::Vararg{Int64,N} where N) at /Users/marcelord12/.julia/v0.6/CodeTools/src/eval.jl:34
[10] (::Atom.##102#107{String,Int64,String})() at /Users/marcelord12/.julia/v0.6/Atom/src/eval.jl:82
[11] withpath(::Atom.##102#107{String,Int64,String}, ::String) at /Users/marcelord12/.julia/v0.6/CodeTools/src/utils.jl:30
[12] withpath(::Function, ::String) at /Users/marcelord12/.julia/v0.6/Atom/src/eval.jl:38
[13] hideprompt(::Atom.##101#106{String,Int64,String}) at /Users/marcelord12/.julia/v0.6/Atom/src/repl.jl:67
[14] macro expansion at /Users/marcelord12/.julia/v0.6/Atom/src/eval.jl:80 [inlined]
[15] (::Atom.##100#105{Dict{String,Any}})() at ./task.jl:80
while loading /Users/marcelord12/Documents/phd2018/mppo/thesis/MDP/LP/Sample.jl, in expression starting on line 39
WARNING: Problem status UnknownError; solution may be inaccurate.

The deprecation warnings comes from inside Convex.jl so it seems that package needs to get updated.