MWE:
using ChainRulesCore: @ignore_derivatives
using DSP
using DSP.Periodograms: forward_plan
using Zygote
function mystft(x::AbstractVector{T}, n, noverlap) where {T}
numsamples = length(x)
numframes = numsamples >= n ? div((numsamples - n), n - noverlap) + 1 : 0
inputtype = fftintype(T)
outtype = fftouttype(T)
oneton = Base.OneTo(n)
tmp = similar(x, outtype, T <: Real ? (n >> 1)+1 : n)
plan = @ignore_derivatives forward_plan(zeros(inputtype, n), tmp)
map(1:numframes) do i
offset = (i-1) * (n - noverlap)
x1 = inputtype.(x[offset .+ oneton])
plan * x1
end |> stack
end
xF64 = randn(Float64, 10000)
xF32 = Float32.(xF64)
n = 128
noverlap = 64
stft_xF32 = stft(xF32, n, noverlap)
stft_xF64 = stft(xF64, n, noverlap)
mystft_xF32 = mystft(xF32, n, noverlap)
mystft_xF64 = mystft(xF64, n, noverlap)
stft_xF32 == mystft_xF32 # true
stft_xF64 == mystft_xF64 # true
mystft_xF64 ≈ mystft_xF32 # true
gradient(x -> sum(abs, mystft(x, n, noverlap)), xF64) # this works
gradient(x -> sum(abs, mystft(x, n, noverlap)), xF32)
# ERROR: ArgumentError: FFTW plan applied to array with wrong memory alignment
The gradient of mystft
with respect to Vector{Float64} works but it throws ERROR: ArgumentError: FFTW plan applied to array with wrong memory alignment
when Vector{Float32} is the input argument. Is this a bug?
Julia and package versions:
julia> versioninfo()
Julia Version 1.9.2
Commit e4ee485e909 (2023-07-05 09:39 UTC)
Platform Info:
OS: Linux (x86_64-linux-gnu)
CPU: 8 × Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-14.0.6 (ORCJIT, skylake)
Threads: 4 on 8 virtual cores
Environment:
LD_LIBRARY_PATH = /opt/ros/noetic/lib::/usr/local/cuda-11.7/lib64
JULIA_EDITOR = code
JULIA_NUM_THREADS = 4
(test_stft) pkg> st
Status `~/Projects/tmp/test_stft/Project.toml`
[d360d2e6] ChainRulesCore v1.16.0
[717857b8] DSP v0.7.8
[e88e6eb3] Zygote v0.6.62