How does broadcast! work ? Tips for memory efficiency while using 'kron'

I was trying to use broadcast! in place of broadcast in the following code :

function _tensor_product_(st)
    ten = st[1]
    for s in st[2:end]
        ten = kron(ten, s)
    end
    return ten
end


@noinline function indi(arr, ind1, ind2, no_of_e)
    linit = [speye(2) for e=1:no_of_e]
    op = spzeros(2^no_of_e, 2^no_of_e)
    op1 = spzeros(2^no_of_e, 2^no_of_e)
    for elem in arr[ind1:ind2]
        println(" elem ", elem)
        for e in elem
            linit[e] = SparseMatrixCSC([0. 1.;1. 0.])
        end
        tp = _tensor_product_(linit)
        op += tp
        # op1 = broadcast(+, op1, tp)
        broadcast!(+, op1, op1, tp)
        # resetting the configuration for next iteration
        for i=1:length(linit)
            linit[i] = speye(2)
        end
        println(op1 == op)
        gc()
    end
    return op
end

r = [[1, 3, 6, 4], [2, 4, 7, 5], [6, 8, 1, 9], [7, 9, 2, 10]]
indi(r, 1, 4, 14)

The behavior of broadcast and broadcast! are not the same, while the former works as I expect but the latter does not replicate the former (was trying to do it in place).

Also, if I have the following the memory shoots up, one way to tackle is to use mmap and store it to disk as a tensor product is being performed. So how to use mmap in conjugation with sparse matrices and also are there any general methods to optimize the above routine for memory efficiency ?

# a = [[1, 3, 6, 4], [2, 4, 7, 5], [6, 8, 11, 9], [7, 9, 12, 10], [11, 13, 16, 14], [12, 14, 17, 15], [16, 18, 1, 19], [17, 19, 2, 20]]
# indi(a, 1, 4, 28)

Thanks !

Probably https://github.com/JuliaLang/julia/issues/21693. Should be fixed on master by https://github.com/JuliaLang/julia/pull/25890.

1 Like