Does Flux not support Float16 convolution operation (Flux.NNlib.conv) with CuArray?

using Flux, CUDA
struct filters{T <: Real} 
    f::AbstractArray{T,3}
    function filters{T}(f) where T <: Real
        new(f)
    end
end
Flux.@functor filters

And this works:

T = Float32
q = filters{T}(cu(rand(50, 1, 15)))
X = cu(randn(T, (100, 1, 4)))
S = cu(randn(T,(51,15,4)))
ps = Flux.params(q)
gs = gradient(ps) do
    sum(Flux.NNlib.conv(X, q.f, pad=0) - S)
end

But this doesn’t work:

T = Float16
q = filters{T}(cu(rand(50, 1, 15)))
X = cu(randn(T, (100, 1, 4)))
S = cu(randn(T,(51,15,4)))
ps = Flux.params(q)
gs = gradient(ps) do
sum(Flux.NNlib.conv(X, q.f, pad=0) - S)

which gives

CUDNNError: CUDNN_STATUS_BAD_PARAM (code 3)

I think this is: Float16 CUDA `conv` broken on 5D tensors · Issue #68 · FluxML/NNlibCUDA.jl · GitHub

1 Like