If you want something scalable, try using MadNLP.
using MadNLP, LinearAlgebra
struct MyProblem{T, VT, MT} <: MadNLP.AbstractNLPModel{T, VT}
Q::MT
counters::MadNLP.NLPModels.Counters
meta::MadNLP.NLPModelMeta{T,VT}
end
MyProblem(Q) = MyProblem(
Q,
MadNLP.NLPModels.Counters(),
MadNLP.NLPModelMeta(
size(Q,1);
x0 = fill!(similar(Q,size(Q,1)), 10),
ncon = 1,
ucon = fill!(similar(Q,1), 0)
)
)
MadNLP.obj(m::MyProblem, x::AbstractVector) = dot(x,m.Q,x)
MadNLP.grad!(m::MyProblem, x::AbstractVector, y::AbstractVector) = (mul!(y,m.Q,x); y.*=2)
MadNLP.cons!(m::MyProblem, x::AbstractVector, y::AbstractVector) = fill!(y, 1 - dot(x,x))
MadNLP.jac_dense!(m::MyProblem, x::AbstractVector, j::AbstractMatrix) = (j .= x'; j.*=-2)
function MadNLP.hess_dense!(m::MyProblem, x::AbstractVector, y::AbstractVector, h::AbstractMatrix; obj_weight= one(eltype(x)))
copyto!(h, m.Q)
h .*= obj_weight
view(h,1: n+1: n^2) .-= y[1]
end
n = 100;
A = 10*rand(Float64,(n,n));
P = A'*A;
m = MyProblem(P)
madnlp(m; lapack_algorithm=MadNLP.CHOLESKY, max_iter= 10000)
# got an Nvidia gpu?
using MadNLPGPU, CUDA
MadNLP.obj(m::MyProblem, x::CuVector) = dot(x,m.Q * x)
function MadNLP.hess_dense!(m::MyProblem, x::CuVector, y::CuVector, h::CuMatrix; obj_weight= one(eltype(x)))
copyto!(h, m.Q)
h .*= obj_weight
CUDA.@allowscalar view(h,1: n+1: n^2) .-= y[1]
end
m = MyProblem(CuArray(P))
madnlp(m; lapack_algorithm=MadNLP.CHOLESKY, max_iter= 10000)