You could try to use eachslice. It isn’t quite as elegant as you would hope but here goes:
julia> a = zeros(2,2);
julia> for (i, b) in enumerate(eachslice(a; dims=1))
b .= i
end
julia> a
2×2 Array{Float64,2}:
1.0 1.0
2.0 2.0
julia> for (i, b) in enumerate(eachslice(a; dims=2))
b .= i
end
julia> a
2×2 Array{Float64,2}:
1.0 2.0
1.0 2.0
Do you mind if i nitpick your code, btw? (I love nitpicking code, you see…)
Don’t do this:
for j in 1:2
a[:,j] = j * ones(2)
end
On the right hand side here you allocate, not one, but two unnecessary temporary arrays. First, ones(2) allocates, and the multiplication with j allocates another array. Instead, just write
for j in 1:2
a[:,j] .= j
end
No allocations! (Also, size(a)[dim] should be size(a, dim), which is a really tiny nit.)
First of all thank you for your fast answer and the nitpicking
Does it make sense to use size(a, dim) even if size would be called more than once?
And since you said you love nitpicking code, I’ve written some kind of n-dimensional gradient function and was wondering if this could get more efficient?
function gradient(array::AbstractArray)
N = size(array)
grad_array = [zeros(N) for dim ∈ 1:length(N)]
for dim ∈ 1:length(N)
for (grad, field) ∈ zip(eachslice(array; dims = dim), eachslice(grad_array[dim]; dims = dim))
finite_derivative!(grad, field, 1.)
end
end
grad_array
end
function finite_derivative!(derivative, time_series, Δt::AbstractFloat = 1.)
T = length(time_series)
derivative[1] = (time_series[2]-time_series[1])/Δt
for t ∈ 2:T-1
derivative[t] = ((time_series[t+1] - time_series[t-1]) / 2Δt)
end
derivative[T] = (time_series[T]-time_series[T-1])/Δt
end