In the following MWE, all interpolation evaluations cause 1 allocation. I want to get rid of this allocation for optimal performance. What should I do to achieve this?
using Interpolations, BenchmarkTools
# Create test data
n = 5  # Size of test matrix
alphas = range(-10, 10, n)  # Angle of attack range: -10° to 10°
d_trailing_edge_angles = range(0, 30, n)  # Trailing edge deflection: 0° to 30°
# Create cl_matrix with a simple aerodynamic model
cl_matrix = zeros(n, n)
for (i, alpha) in enumerate(alphas)
    for (j, delta) in enumerate(d_trailing_edge_angles)
        # Simple linear combination of angle of attack and trailing edge deflection
        cl_matrix[i,j] = 2π * (deg2rad(alpha) + 0.3 * deg2rad(delta))
    end
end
cl_interp = extrapolate(scale(interpolate(cl_matrix, BSpline(Linear())), alphas, d_trailing_edge_angles), NaN)
@btime cl_interp[0.0, 0.0]
cl_interp = scale(interpolate(cl_matrix, BSpline(Linear())), alphas, d_trailing_edge_angles)
@btime cl_interp[0.0, 0.0]
cl_interp = interpolate((alphas, d_trailing_edge_angles), cl_matrix, Gridded(Linear()))
@btime cl_interp[0.0, 0.0]
Output:
  62.233 ns (1 allocation: 16 bytes)
  61.350 ns (1 allocation: 16 bytes)
  84.053 ns (1 allocation: 16 bytes)