Okay, i might have something. Using MultiFloats.jl, with a personal (dirty) fix for exp / log, i have the following
Code that I ran
# ]add github.com/lrnv/ThorinDistributions.jl
# ]add https://github.com/lrnv/MultiFloats.jl
import Random, ThorinDistributions
using DoubleFloats
using ArbNumerics
using Quadmath
using MultiFloats
setprecision(256)
setprecision(ArbFloat,256)
types = (Float64,
Double64,
Float128,
BigFloat,
Float64x1,
Float64x2,
Float64x3,
Float64x4,
Float64x5,
Float64x6,
Float64x7,
Float64x8
)
n = 20
d = 2
m = Tuple(repeat([80],d))
N = 1000
Random.seed!(1234)
α = rand(n)
θ = reshape(10 .* rand(n*d),(n,d))
init = (0.0, 0.0, 0.0)
init_val = (zeros(m),zeros(m))
Values = Dict(T => init_val for T in types)
Errors = Dict(T => init for T in types)
for T in types
print(T,"\n")
alpha = T.(α);
scales = T.(θ);
dist = ThorinDistributions.MultivariateGammaConvolution(alpha,scales);
samples = zeros(T,(2,N));
Random.seed!(123);
Random.rand!(dist,samples);
E = ThorinDistributions.empirical_coefs(samples,m); # empirical coefs
coefs = ThorinDistributions.get_coefficients(alpha, scales,m); # theoreticla coefs
aloc = @allocated ThorinDistributions.get_coefficients(alpha,scales,m);
time = Base.@elapsed(ThorinDistributions.get_coefficients(alpha,scales,m));
# All these should have the same magnitude, very small.
Errors[T] = (Float64(sum((E - coefs)^2)),time,aloc);
Values[T] = (Float64.(E), Float64.(coefs));
print(Errors[T],"\n")
end
# Type => (Error, time1, allocations)
display(Errors)
Which gives me the following result :
Float64 => (2.68084e110, 0.121672, 286112.0)
Float128 => (4.51278e74, 1.84456, 467600.0)
Double64 => (2.10301e78, 0.516504, 467600.0)
MultiFloat{Float64,1} => (4.1367e109, 0.124149, 296656.0)
MultiFloat{Float64,2} => (3.84925e78, 0.26139, 486208.0)
MultiFloat{Float64,3} => (-6.13933e44, 0.66715, 676464.0)
MultiFloat{Float64,4} => (3.1583e14, 1.33451, 864800.0)
MultiFloat{Float64,5} => (2.367e-8, 2.39511, 1.05234e6)
MultiFloat{Float64,6} => (2.36701e-8, 4.46531, 1.25013e6)
MultiFloat{Float64,7} => (2.36701e-8, 6.50383, 1.44102e6)
MultiFloat{Float64,8} => (2.36701e-8, 9.84249, 1.63157e6)
BigFloat => (2.48274e-8, 12.6092, 8.16053e9)
It seems like the Float64x5
might be enough for this case. The dirty fix i used is that epx/log were not implemented, so i used a convertion to bigfloat back and forth to use the bigfloat implementation. @JeffreySarnoff Do you know if there is some cleaner algorithm that could be implemented for exp/log in this library ?