CUDA.jl errors with basic functions

I’m getting

ERROR: MethodError: no method matching personality!(::LLVM.Function, ::Nothing)

when attempting to multiply a CuArray by a scalar. Any ideas what could be wrong?
Full error details below:

julia> using CUDA

julia> CUDA.version()
v"11.0.0"

julia> a = CUDA.fill(1.0f0, 100000);

julia> a .*= 2
ERROR: MethodError: no method matching personality!(::LLVM.Function, ::Nothing)
Closest candidates are:
  personality!(::LLVM.Function, ::LLVM.Function) at C:\Users\MAL\.julia\packages\LLVM\wQgrk\src\core\function.jl:16
Stacktrace:
 [1] macro expansion at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\irgen.jl:353 [inlined]
 [2] macro expansion at C:\Users\MAL\.julia\packages\TimerOutputs\dVnaw\src\TimerOutput.jl:206 [inlined]
 [3] irgen(::GPUCompiler.CompilerJob{GPUCompiler.PTXCompilerTarget,CUDA.CUDACompilerParams}, ::Core.MethodInstance, ::UInt64) at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\irgen.jl:335
 [4] macro expansion at C:\Users\MAL\.julia\packages\TimerOutputs\dVnaw\src\TimerOutput.jl:206 [inlined]
 [5] macro expansion at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\driver.jl:98 [inlined]
 [6] macro expansion at C:\Users\MAL\.julia\packages\TimerOutputs\dVnaw\src\TimerOutput.jl:206 [inlined]
 [7] codegen(::Symbol, ::GPUCompiler.CompilerJob{GPUCompiler.PTXCompilerTarget,CUDA.CUDACompilerParams}; libraries::Bool, deferred_codegen::Bool, optimize::Bool, strip::Bool, validate::Bool) at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\driver.jl:97
 [8] emit_function!(::LLVM.Module, ::GPUCompiler.CompilerJob{GPUCompiler.PTXCompilerTarget,CUDA.CUDACompilerParams}, ::Function, ::GPUCompiler.Runtime.RuntimeMethodInstance) at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\rtlib.jl:77
 [9] build_runtime(::GPUCompiler.CompilerJob{GPUCompiler.PTXCompilerTarget,CUDA.CUDACompilerParams}) at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\rtlib.jl:117
 [10] (::GPUCompiler.var"#52#55"{GPUCompiler.CompilerJob{GPUCompiler.PTXCompilerTarget,CUDA.CUDACompilerParams},String,String})() at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\rtlib.jl:159
 [11] get!(::GPUCompiler.var"#52#55"{GPUCompiler.CompilerJob{GPUCompiler.PTXCompilerTarget,CUDA.CUDACompilerParams},String,String}, ::Dict{String,LLVM.Module}, ::String) at .\dict.jl:452
 [12] load_runtime(::GPUCompiler.CompilerJob{GPUCompiler.PTXCompilerTarget,CUDA.CUDACompilerParams}) at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\rtlib.jl:151
 [13] codegen(::Symbol, ::GPUCompiler.CompilerJob{GPUCompiler.PTXCompilerTarget,CUDA.CUDACompilerParams}; libraries::Bool, deferred_codegen::Bool, optimize::Bool, strip::Bool, validate::Bool) at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\driver.jl:93
 [14] compile(::Symbol, ::GPUCompiler.CompilerJob{GPUCompiler.PTXCompilerTarget,CUDA.CUDACompilerParams}; libraries::Bool, deferred_codegen::Bool, optimize::Bool, strip::Bool, validate::Bool) at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\driver.jl:36
 [15] compile at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\driver.jl:32 [inlined]
 [16] _cufunction(::GPUCompiler.FunctionSpec{GPUArrays.var"#20#21",Tuple{CUDA.CuKernelContext,CuDeviceArray{Float32,1,CUDA.AS.Global},Base.Broadcast.Broadcasted{Nothing,Tuple{Base.OneTo{Int64}},typeof(*),Tuple{Base.Broadcast.Extruded{CuDeviceArray{Float32,1,CUDA.AS.Global},Tuple{Bool},Tuple{Int64}},Int64}}}}; kwargs::Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}}) at C:\Users\MAL\.julia\packages\CUDA\VPAke\src\compiler\execution.jl:308
 [17] _cufunction at C:\Users\MAL\.julia\packages\CUDA\VPAke\src\compiler\execution.jl:302 [inlined]
 [18] #87 at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\cache.jl:21 [inlined]
 [19] get!(::GPUCompiler.var"#87#88"{Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}},typeof(CUDA._cufunction),GPUCompiler.FunctionSpec{GPUArrays.var"#20#21",Tuple{CUDA.CuKernelContext,CuDeviceArray{Float32,1,CUDA.AS.Global},Base.Broadcast.Broadcasted{Nothing,Tuple{Base.OneTo{Int64}},typeof(*),Tuple{Base.Broadcast.Extruded{CuDeviceArray{Float32,1,CUDA.AS.Global},Tuple{Bool},Tuple{Int64}},Int64}}}}}, ::Dict{UInt64,Any}, ::UInt64) at .\dict.jl:452
 [20] macro expansion at .\lock.jl:183 [inlined]
 [21] check_cache(::typeof(CUDA._cufunction), ::GPUCompiler.FunctionSpec{GPUArrays.var"#20#21",Tuple{CUDA.CuKernelContext,CuDeviceArray{Float32,1,CUDA.AS.Global},Base.Broadcast.Broadcasted{Nothing,Tuple{Base.OneTo{Int64}},typeof(*),Tuple{Base.Broadcast.Extruded{CuDeviceArray{Float32,1,CUDA.AS.Global},Tuple{Bool},Tuple{Int64}},Int64}}}}, ::UInt64; kwargs::Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}}) at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\cache.jl:19
 [22] + at .\int.jl:53 [inlined]
 [23] hash_64_64 at .\hashing.jl:35 [inlined]
 [24] hash_uint64 at .\hashing.jl:62 [inlined]
 [25] hx at .\float.jl:568 [inlined]
 [26] hash at .\float.jl:571 [inlined]
 [27] cached_compilation(::typeof(CUDA._cufunction), ::GPUCompiler.FunctionSpec{GPUArrays.var"#20#21",Tuple{CUDA.CuKernelContext,CuDeviceArray{Float32,1,CUDA.AS.Global},Base.Broadcast.Broadcasted{Nothing,Tuple{Base.OneTo{Int64}},typeof(*),Tuple{Base.Broadcast.Extruded{CuDeviceArray{Float32,1,CUDA.AS.Global},Tuple{Bool},Tuple{Int64}},Int64}}}}, ::UInt64; kwargs::Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}}) at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\cache.jl:0
 [28] cached_compilation(::Function, ::GPUCompiler.FunctionSpec{GPUArrays.var"#20#21",Tuple{CUDA.CuKernelContext,CuDeviceArray{Float32,1,CUDA.AS.Global},Base.Broadcast.Broadcasted{Nothing,Tuple{Base.OneTo{Int64}},typeof(*),Tuple{Base.Broadcast.Extruded{CuDeviceArray{Float32,1,CUDA.AS.Global},Tuple{Bool},Tuple{Int64}},Int64}}}}, ::UInt64) at C:\Users\MAL\.julia\packages\GPUCompiler\FnKRy\src\cache.jl:37
 [29] cufunction(::Function, ::Type; name::String, kwargs::Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}}) at C:\Users\MAL\.julia\packages\CUDA\VPAke\src\compiler\execution.jl:296
 [30] macro expansion at C:\Users\MAL\.julia\packages\CUDA\VPAke\src\compiler\execution.jl:108 [inlined]
 [31] gpu_call(::CUDA.CuArrayBackend, ::Function, ::Tuple{CuArray{Float32,1,Nothing},Base.Broadcast.Broadcasted{Nothing,Tuple{Base.OneTo{Int64}},typeof(*),Tuple{Base.Broadcast.Extruded{CuArray{Float32,1,Nothing},Tuple{Bool},Tuple{Int64}},Int64}}}, ::Int64; name::String) at C:\Users\MAL\.julia\packages\CUDA\VPAke\src\gpuarrays.jl:32
 [32] #gpu_call#1 at C:\Users\MAL\.julia\packages\GPUArrays\X4SqE\src\device\execution.jl:61 [inlined]
 [33] copyto! at C:\Users\MAL\.julia\packages\GPUArrays\X4SqE\src\host\broadcast.jl:57 [inlined]
 [34] copyto! at .\broadcast.jl:864 [inlined]
 [35] materialize!(::CuArray{Float32,1,Nothing}, ::Base.Broadcast.Broadcasted{CUDA.CuArrayStyle{1},Nothing,typeof(*),Tuple{CuArray{Float32,1,Nothing},Int64}}) at .\broadcast.jl:823
 [36] top-level scope at REPL[6]:1

https://github.com/JuliaGPU/CUDA.jl/issues/229 – please upgrade LLVM.jl, which should resolve the issue.