First alternative,

```
x = A \ b # Uses the QR method
```

Second alternative

```
Q, R = qr(A) # Compute the QR factorization to use the reduce form
x = inv(factorize(R)) * Q.'b # Notice that R is upper triangular
```

Those are the two ways you can use `Base.LinAlg.QRCompactWY`

.

A more efficient method in general is Cholesky decomposition. A more precise method usually is singular values decomposition. Here are a few options which compute the information matrix as well (critical component for variance covariance estimates).

```
function solve(linearpredictor::AbstractMatrix, response::AbstractVector,
method::Type{Base.LinAlg.QRCompactWY})
Q, R = qr(linearpredictor)
Î˛ = inv(R) * Q.'response
IM = inv(factorize(R)) * inv(factorize(R.'))
Î˛, IM
end
# Similar to using x = pinv(A) * b
function solve(linearpredictor::AbstractMatrix, response::AbstractVector,
method::Type{Base.LinAlg.SVD})
SVD = svdfact(linearpredictor)
Î˛ = SVD[:V] * diagm(1 ./ SVD[:S]) * SVD[:U].'response
IM = SVD[:V] * diagm(1 ./ SVD[:S].^2) * SVD[:Vt]
Î˛, IM
end
function solve(linearpredictor::AbstractMatrix, response::AbstractVector,
method::Type{Base.LinAlg.Cholesky})
IM = inv(cholfact!(linearpredictor.'linearpredictor))
Î˛ = IM * linearpredictor.'response
Î˛, IM
end
function solve(linearpredictor::AbstractMatrix, response::AbstractVector,
method::Type{Base.LinAlg.BunchKaufman})
IM = inv(bkfact(linearpredictor.'linearpredictor))
Î˛ = IM * linearpredictor.'response
Î˛, IM
end
```

As bonus, I am gonna leave here a quick way to make the system full rank by dropping linearly dependently predictors efficiently for overdetermined systems (more efficient than looking at non zero elements of the R in the QR decomposition).

```
function linearindependent(obj::AbstractMatrix{T}) where T <: Real
cf = cholfact!(Symmetric(obj.'obj, :U), Val{true}, tol = -one(eltype(obj)))
r = cf.rank
p = size(obj, 2)
if r < p
LI = sort!(cf.piv[1:r])
obj = obj[:, LI]
else
LI = eachindex(cf.piv)
end
return obj, LI
end
```