This is best explained by an example.
type Test
x::Vector{Float64}
end
import Base: getindex
getindex(s::Test,sym::Symbol) = getindex(s,Val{sym})
getindex(s::Test,::Type{Val{:test_sym}}) = reshape(view(s.x,1:9),3,3)
function test_dispatch(s)
s[:test_sym]
end
function test_dispatch2(s)
s[Val{:test_sym}]
end
s = Test(collect(1:10))
@code_llvm test_dispatch(s)
@code_llvm test_dispatch2(s)
The first gives
; Function Attrs: uwtable
define %jl_value_t* @julia_test_dispatch_63361(%jl_value_t*) #0 {
top:
%1 = call %jl_value_t*** @jl_get_ptls_states() #4
%2 = alloca [5 x %jl_value_t*], align 8
%.sub = getelementptr inbounds [5 x %jl_value_t*], [5 x %jl_value_t*]* %2, i64 0, i64 0
%3 = getelementptr [5 x %jl_value_t*], [5 x %jl_value_t*]* %2, i64 0, i64 2
%4 = bitcast %jl_value_t** %3 to i8*
call void @llvm.memset.p0i8.i32(i8* %4, i8 0, i32 24, i32 8, i1 false)
%5 = bitcast [5 x %jl_value_t*]* %2 to i64*
store i64 6, i64* %5, align 8
%6 = getelementptr [5 x %jl_value_t*], [5 x %jl_value_t*]* %2, i64 0, i64 1
%7 = bitcast %jl_value_t*** %1 to i64*
%8 = load i64, i64* %7, align 8
%9 = bitcast %jl_value_t** %6 to i64*
store i64 %8, i64* %9, align 8
store %jl_value_t** %.sub, %jl_value_t*** %1, align 8
%10 = getelementptr [5 x %jl_value_t*], [5 x %jl_value_t*]* %2, i64 0, i64 4
%11 = getelementptr [5 x %jl_value_t*], [5 x %jl_value_t*]* %2, i64 0, i64 3
store %jl_value_t* inttoptr (i64 2148817472 to %jl_value_t*), %jl_value_t** %3, align 8
store %jl_value_t* %0, %jl_value_t** %11, align 8
store %jl_value_t* inttoptr (i64 2240320304 to %jl_value_t*), %jl_value_t** %10, align 8
%12 = call %jl_value_t* @jl_apply_generic(%jl_value_t** %3, i32 3)
%13 = load i64, i64* %9, align 8
store i64 %13, i64* %7, align 8
ret %jl_value_t* %12
}
while the second gives
; Function Attrs: uwtable
define %jl_value_t* @julia_test_dispatch2_63423(%jl_value_t*) #0 {
top:
%1 = call %jl_value_t* @julia_getindex_63362(%jl_value_t* %0, %jl_value_t* inttoptr (i64 2240320304 to %jl_value_t*)) #1
ret %jl_value_t* %1
}
I would’ve thought that the compiler would be able to infer what the value type would be since the value was a constant, but from this output it’s clear that it results in dynamic dispatch unless you explicitly have the value type as the constant.
Is there any fundamental reason why the compiler cannot infer this? It seems it should be able to when the values are constants. A quick macro would fix this, but it would be nice to have this “just work”.