Yes, if enough information is provided to the compiler, it is usually able to replace a reassignment of an immutable with a stack mutation:
#+begin_src julia
using Accessors, StaticArrays, BenchmarkTools
f(tup) = Base.setindex(tup, 100, 3)
f((1, 1, 1, 1))
#+end_src
#+RESULTS:
: (1, 1, 100, 1)
#+begin_src julia
code_llvm(f, Tuple{NTuple{4, Int}}, debuginfo=:none)
#+end_src
#+RESULTS:
define void @julia_f_5283([4 x i64]* noalias nocapture noundef nonnull sret([4 x i64]) align 8 dereferenceable(32) %0, [4 x i64]* nocapture noundef nonnull readonly align 8 dereferenceable(32) %1) #0 {
top:
%2 = getelementptr inbounds [4 x i64], [4 x i64]* %1, i64 0, i64 3
%3 = bitcast [4 x i64]* %1 to <2 x i64>*
%4 = load <2 x i64>, <2 x i64>* %3, align 8
%5 = load i64, i64* %2, align 8
%6 = bitcast [4 x i64]* %0 to <2 x i64>*
store <2 x i64> %4, <2 x i64>* %6, align 8
%.sroa.3.0..sroa_idx2 = getelementptr inbounds [4 x i64], [4 x i64]* %0, i64 0, i64 2
store i64 100, i64* %.sroa.3.0..sroa_idx2, align 8
%.sroa.4.0..sroa_idx3 = getelementptr inbounds [4 x i64], [4 x i64]* %0, i64 0, i64 3
store i64 %5, i64* %.sroa.4.0..sroa_idx3, align 8
ret void
}
However, if the immutable contains any mutables, then out of an abundance of caution, the compiler will instead allocate new stack space and ‘repack’ the memory. Note the appearance of the repack
instruction here:
#+begin_src julia
code_llvm(f, Tuple{SVector{4, Base.RefValue{Int}}, Base.RefValue{Int}}, debuginfo=:none)
#+end_src
#+RESULTS:
define void @julia_f_5838([1 x [4 x {}*]]* noalias nocapture noundef nonnull sret([1 x [4 x {}*]]) align 8 dereferenceable(32) %0, [1 x [4 x {}*]]* nocapture noundef nonnull readonly align 8 dereferenceable(32) %1, {}* noundef nonnull align 8 dereferenceable(8) %2) #0 {
top:
%3 = getelementptr inbounds [1 x [4 x {}*]], [1 x [4 x {}*]]* %1, i64 0, i64 0, i64 0
%4 = load atomic {}*, {}** %3 unordered, align 8
%5 = getelementptr inbounds [1 x [4 x {}*]], [1 x [4 x {}*]]* %1, i64 0, i64 0, i64 1
%6 = load atomic {}*, {}** %5 unordered, align 8
%7 = getelementptr inbounds [1 x [4 x {}*]], [1 x [4 x {}*]]* %1, i64 0, i64 0, i64 3
%8 = load atomic {}*, {}** %7 unordered, align 8
%.repack = getelementptr inbounds [1 x [4 x {}*]], [1 x [4 x {}*]]* %0, i64 0, i64 0, i64 0
store {}* %4, {}** %.repack, align 8
%.repack4 = getelementptr inbounds [1 x [4 x {}*]], [1 x [4 x {}*]]* %0, i64 0, i64 0, i64 1
store {}* %6, {}** %.repack4, align 8
%.repack6 = getelementptr inbounds [1 x [4 x {}*]], [1 x [4 x {}*]]* %0, i64 0, i64 0, i64 2
store {}* %2, {}** %.repack6, align 8
%.repack8 = getelementptr inbounds [1 x [4 x {}*]], [1 x [4 x {}*]]* %0, i64 0, i64 0, i64 3
store {}* %8, {}** %.repack8, align 8
ret void
}