1D GAN with Flux

Hey everyone. So I’ve been trying to use Flux for Generative Adversarial Networks (GAN). And unfortunately, it’s not working properly. At the moment, I’m trying to replicate a 1D GAN example (this one here 1D GAN with Keras). But the results are just not good. First, the discriminator takes a lot to properly learn to discern the data, and the generator is even worse.

I was wondering if anyone here has implemented GANs on Flux, and perhaps can show how to implement this 1D example. I’ve based my code on the DCGAN example from the model_zoo.

Can you post the full Julia code for us to compare?

Sure, so here is my code trying to replicate this 1-D GAN:

function generate_real_data(n)
    x1 = rand(1,n) .- 0.5
    x2 = x1 .* x1
    return vcat(x1,x2)
end

function D()
    return Chain(
            Dense(2, 25,relu),
            Dense(25,1,x->σ.(x))
            )
end

function G(latent_dim::Int)
    return Chain(
            Dense(latent_dim, 15,relu),
            Dense(15,2)
            )
end
loss_D(x, y) = sum(Flux.Losses.logitbinarycrossentropy(dscr(x), y))

function trainDiscriminator!(dscr,gen,train_size)
    real = generate_real_data(train_size)
    fake = gen(rand(5,train_size))

    X    = hcat(real,fake)
    Y    = vcat(ones(train_size),zeros(train_size))
    data = Flux.Data.DataLoader(X, Y', batchsize=100,shuffle=true);
    for d in data
        gs = gradient(Flux.params(dscr)) do
            l = loss_D(d...)
        end
        Flux.update!(opt, Flux.params(dscr), gs)
    end
end

loss_G(x) = sum(Flux.Losses.logitbinarycrossentropy(dscr(x),1))

function trainGenerator!(gen,dscr,train_size)
    fake_generated = gen(rand(5,train_size))
    data = Flux.Data.DataLoader(fake_generated, batchsize=100,shuffle=true);
    for d in data
        gs = gradient(Flux.params(gen)) do
            l = loss_G(d)
        end
        Flux.update!(opt, Flux.params(gen), gs)
    end
    fake_generated = gen(rand(5,train_size))
end

gen  = G(5)
dscr = D()
opt  = ADAM()
train_size = 2000

epochs = 10000
for e in 1:epochs
    trainDiscriminator!(dscr,gen,train_size)
    trainGenerator!(gen,dscr,train_size)
    if e%1000 == 0
        real = generate_real_data(train_size)
        fake = gen(rand(5,train_size))
        @show mean(dscr(real)),mean(dscr(fake))
    end
end

real = generate_real_data(train_size)
fake = gen(rand(5,train_size))
scatter(real[1,1:100],real[2,1:100])
scatter!(fake[1,1:100],fake[2,1:100])

Your generator loss is not actually differentiating through the generator; you are sampling the noise distribution, passing those through the generator, and then passing precomputed generator samples to the data loader. You need to pass noise distribution samples to the data loader and call the generator on those noise instances within the loss function. This allows the generator to be differentiated through.

Also, your loss_D and loss_G functions rely on dscr implicitly; it is probably best to explicitly pass the models as arguments (same goes for gen):

loss_D(dscr, x, y) = sum(Flux.Losses.logitbinarycrossentropy(dscr(x), y))

loss_G(dscr, gen, z) = sum(Flux.Losses.logitbinarycrossentropy(dscr(gen(z)), 1))
1 Like

Thanks a lot for the reply. Yet, the generator still does not replicate like the example here. So, here is my updated code:

loss_D(x, y, dscr) = sum(Flux.Losses.logitbinarycrossentropy(dscr(x), y))

function trainDiscriminator!(dscr,gen,train_size)
    real = generate_real_data(train_size)
    fake = gen(rand(5,train_size))

    X    = hcat(real,fake)
    Y    = vcat(ones(train_size),zeros(train_size))
    data = Flux.Data.DataLoader(X, Y', batchsize=128,shuffle=true);
    for d in data
        gs = gradient(Flux.params(dscr)) do
            l = loss_D(d...,dscr)
        end
        Flux.update!(opt, Flux.params(dscr), gs)
    end
end

loss_G(z,gen,dscr) = sum(Flux.Losses.logitbinarycrossentropy(dscr(gen(z)),1))

function trainGenerator!(gen,dscr,train_size)
    noise = rand(5,train_size)
    data = Flux.Data.DataLoader(noise, batchsize=128,shuffle=true);
    for d in data
        gs = gradient(Flux.params(gen)) do
            l = loss_G(d,gen,dscr)
        end
        Flux.update!(opt, Flux.params(gen), gs)
    end
    fake_generated = gen(rand(5,train_size))
end

Finally I got it to work! So the problem was in this sigmoid in the Discriminator and the fact that I was using the logit in the loss function. I removed the sigmoid, and now is all good!

3 Likes

Hello…can you please post the full code. Thanks.

1 Like

plus 1 for the code!

1 Like

@davibarreira Can you post the full code here, even better yet maybe a quick tutorial on: https://github.com/FluxML/fluxml.github.io/tree/main/tutorials/_posts ?

Please check this code over here that I made, must be at least similar to the pretended

https://github.com/joofio/ds-curriculum/blob/main/generative/1D-GAN-julia.ipynb