I’ve now implemented support for dynamic-sized vectors on the dev branch of NLLSsolver. So you can now solve your problem using that solver as follows:
using NLLSsolver, Random, Static
function synthetic_data(N, n) #simulate data
X_raw = randn(N, n) #the numerical data
X_raw = (X_raw .- minimum(X_raw, dims=1)) ./ (maximum(X_raw, dims=1) .- minimum(X_raw, dims=1)) #min/max standardize
#generate the labels
y_raw = X_raw*randn(n) + randn(N).*5 .+ 1.0 #error added at the end, followed by an intercept of "1"
return X_raw, y_raw
end
struct LinearResidual{T} <: NLLSsolver.AbstractResidual
y::T # Target value
X::NLLSsolver.DynamicVector{T}
end
NLLSsolver.ndeps(::LinearResidual) = static(1) # Residual depends on 2 variables
NLLSsolver.nres(::LinearResidual) = static(1) # Residual vector has length 2
NLLSsolver.varindices(::LinearResidual) = 1
NLLSsolver.getvars(::LinearResidual{T}, vars::Vector) where T = (vars[1]::NLLSsolver.DynamicVector{T},)
NLLSsolver.computeresidual(res::LinearResidual, w) = res.X' * w - res.y
NLLSsolver.computeresjac(varflags, res::LinearResidual, w) = res.X' * w - res.y, res.X'
const huberrobustifier = NLLSsolver.Huber2oKernel(6.0)
NLLSsolver.robustkernel(::LinearResidual) = huberrobustifier
Base.eltype(::LinearResidual{T}) where T = T
# Generate some test data
Random.seed!(1);
N, n = 1000, 10; #size
X_raw, y_raw = synthetic_data(N, n); #simulate data
# Create the problem
problem = NLLSsolver.NLLSProblem{NLLSsolver.DynamicVector{Float64}}()
NLLSsolver.addvariable!(problem, zeros(n))
for i in 1:N
NLLSsolver.addresidual!(problem, LinearResidual(y_raw[i], vec(X_raw[i,:])))
end
# Optimize
result = NLLSsolver.optimize!(problem, NLLSsolver.NLLSOptions(iterator=NLLSsolver.gaussnewton, storecosts=true))
show(result)
I get a solution cost of about 24033, but perhaps there is some difference in our data. ![]()