I think this version is not optimal if n is very small compared to the size of the array since you make a full sort and just retain a few smallest values. At the limit, imagine to get the lowest value of an array you make a sort first. That does not sound right. A dirty write-up like the following
using BenchmarkTools
A=rand(100,100,10,40,10);
function arg_n_smallest_values(A::AbstractArray{T,N}, n::Integer) where {T,N}
perm = sortperm(vec(A))
ci = CartesianIndices(A)
return ci[perm[1:n]]
end
function argsmallest(A::AbstractArray{T,N}, n::Integer) where {T,N}
# should someone ask more elements than array size, just sort array
if n>= length(vec(A))
ind=collect(1:length(vec(A)))
ind=sortperm(A[ind])
return CartesianIndices(A)[ind]
end
# otherwise
ind=collect(1:n)
mymax=maximum(A[ind])
for j=n+1:length(vec(A))
if A[j]<mymax
getout=findmax(A[ind])[2]
ind[getout]=j
mymax=maximum(A[ind])
end
end
ind=ind[sortperm(A[ind])]
return CartesianIndices(A)[ind]
end
@btime arg_n_smallest_values(A,5) # 16.61 s
@btime argsmallest(A,5) # 42 ms
already greatly improves the timing, but I’m sure there are even better ways ?