For what it’s worth
julia> regions = ["LO","AQ"];
julia> years = collect(2015:2016);
julia> forTypes = ["hf","con"];
julia> vol = Dict(("LO","hf",2015) => 100.0,
("LO","con",2015) => 300.0,
("AQ","hf",2015) => 500.0,
("AQ","con",2015) => 700.0,
);
julia> m = Dict(("LO","hf",2015) => 0.11,
("LO","hf",2016) => 0.12,
("LO","con",2015) => 0.13,
("LO","con",2016) => 0.14,
("AQ","hf",2015) => 0.15,
("AQ","hf",2016) => 0.16,
("AQ","con",2015) => 0.17,
("AQ","con",2016) => 0.18,
);
julia> using Base.Cartesian
julia> iter = (regions, forTypes, years[2:end]);
julia> function nloop!(vol::Dict{Tuple{I1,I2,I3},T}, m::Dict{Tuple{I1,I2,I3},T}, iter::Tuple{Vector{I1},Vector{I2},Vector{I3}}) where {I1,I2,I3,T}
@nloops 3 i d -> begin
iter[d]
end begin
vol[i_1, i_2, i_3] = vol[i_1,i_2,i_3-1] * (1-m[i_1,i_2,i_3-1])
end
vol
end
nloop! (generic function with 1 method)
julia> function listcomp!(vol::Dict{Tuple{I1,I2,I3},T}, m::Dict{Tuple{I1,I2,I3},T}, regions::Vector{I1}, forTypes::Vector{I2}, years::Vector{I3}) where {I1,I2,I3,T}
[vol[r,ft,y] = vol[r,ft,y-1] * (1-m[r,ft,y-1]) for r in regions, ft in forTypes, y in years[2:end] ]
vol
end
listcomp! (generic function with 1 method)
julia> nloop!(vol, m, iter)
Dict{Tuple{String,String,Int64},Float64} with 8 entries:
("AQ", "con", 2015) => 700.0
("LO", "hf", 2015) => 100.0
("LO", "hf", 2016) => 89.0
("AQ", "con", 2016) => 581.0
("AQ", "hf", 2016) => 425.0
("LO", "con", 2015) => 300.0
("AQ", "hf", 2015) => 500.0
("LO", "con", 2016) => 261.0
julia> #reset vol
vol = Dict(("LO","hf",2015) => 100.0,
("LO","con",2015) => 300.0,
("AQ","hf",2015) => 500.0,
("AQ","con",2015) => 700.0,
);
julia> listcomp!(vol, m, regions, forTypes, years)
Dict{Tuple{String,String,Int64},Float64} with 8 entries:
("AQ", "con", 2015) => 700.0
("LO", "hf", 2015) => 100.0
("LO", "hf", 2016) => 89.0
("AQ", "con", 2016) => 581.0
("AQ", "hf", 2016) => 425.0
("LO", "con", 2015) => 300.0
("AQ", "hf", 2015) => 500.0
("LO", "con", 2016) => 261.0
julia> using BenchmarkTools
julia> @benchmark nloop!($vol, $m, $iter)
BenchmarkTools.Trial:
memory estimate: 1.47 KiB
allocs estimate: 82
--------------
minimum time: 2.561 μs (0.00% GC)
median time: 2.649 μs (0.00% GC)
mean time: 2.793 μs (3.11% GC)
maximum time: 213.791 μs (94.72% GC)
--------------
samples: 10000
evals/sample: 9
julia> @benchmark listcomp!($vol, $m, $regions, $forTypes, $years)
BenchmarkTools.Trial:
memory estimate: 2.91 KiB
allocs estimate: 114
--------------
minimum time: 2.945 μs (0.00% GC)
median time: 3.062 μs (0.00% GC)
mean time: 3.348 μs (6.17% GC)
maximum time: 211.250 μs (95.48% GC)
--------------
samples: 10000
evals/sample: 9
You could try benchmarking at sizes closer to what you’re using.
However, readability is definitely important too.