Hi all,
I would like to use the LinearAlgebra package in Enzyme.
Since Julia’s sparse matrix package, SparseArrays, did not seem to be available, I created my own sparse-matrix library.
When I looked at the Enzyme testing program, I found a BLAS test.
Therefore, using that library, I created a program for the ICCG method to solve the simultaneous equations.
I used BLAS.dot(), but in some cases the program crashed when used.
In the program, resl0 = sqrt(BLAS.dot(res, res)) is not a problem. But sk = BLAS.dot(res, zk2) and pkapk = BLAS.dot(pk, zk) crash the program.
I would like to know if Enzyme currently supports up to the LinearAlgebra part.
I am not even able to use basic functions such as det(), although I have not used them in this ICCG program.
If you know more about this, please let me know.
Some of the programs are as follows:
#------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------
function ICCG(A::MySparseMatrixCRS, b::Vector{Float64}, x0::Vector{Float64}=zeros(Float64, length(b)), itr_max::Int64=100000, tol_abs::Float64=1.0e-13, tol_rel::Float64=1.0e-13)
#
small = 1.0e-15
# Initalize working value and arrays
n = A.nrows
pk = zeros(Float64, n)
x = zeros(Float64, n)
# Calculate initial residual vector and the norm
res = copy(b) - A * x0
# L^1-norm of residual
#resl0 = sqrt(my_dot_product(res, res))
resl0 = sqrt(BLAS.dot(res, res))
tol = tol_abs + tol_rel * resl0
# Initially converged solution
if resl0 < tol
println("Iteration: 0, Final residual = $(resl0)")
return x
end
# create diag
diag = Vector{Int64}(undef, n)
Threads.@threads for i in 1 : n
for k in A.row_ptr[i] : A.row_ptr[i+1] - 1
if A.col_ind[k] == i
diag[i] = k
end
end
end
# Calculate elements of diagonal preconditioning matrix
d = Vector{Float64}(undef, n)
for i in 1 : n
d[i] = A.values[diag[i]]
for k in A.row_ptr[i] : diag[i] - 1
d[i] -= A.values[k]^2.0 * d[A.col_ind[k]]
end
d[i] = 1.0 / (d[i] + small)
end
# Start iterations
s0 = 1.0e+20
itr_used = 0
for l in 1 : itr_max
# forward substitution
zk1 = forward_substitution(A, d, diag, res)
# Backward substitution
zk2 = backward_substitution(A, d, diag, zk1)
# Inner product
sk = my_dot_product(res, zk2)
#sk = BLAS.dot(res, zk2)
# Calculate beta
bet = sk / s0
# Calculate new search vector pk
pk .= zk2 .+ bet .* pk
# Calculate scalar product (pk.a pk) and alpha (overwrite zk)
zk = A * pk
# Inner product
pkapk = my_dot_product(pk, zk)
#pkapk = BLAS.dot(pk, zk)
# Calculate alpha
alf = sk / pkapk
# Update solution vector
x .+= alf .* pk
# Update residual vector
res .-= alf .* zk
# L1-norm of residual
#resl = my_dot_product(res, res)
resl = BLAS.dot(res, res)
# update
s0 = sk
itr_used = itr_used + 1
# Check convergence
rsm = sqrt(resl)
if rsm < tol
#println("Convergence achieved after $(itr_used) iterations, residual = $(rsm).")
return x
end
end
#
println("Max iteration. Not converged")
return x
end