Flux.jl manual training loop results in `error gradent(F, ::Params) are deprecated`

I have tried the below training loop in Julia and consistently receiving an error gradent(F, ::Params) are deprecated in Flux.

How do I solve this error:

epochs = 10

for epoch in 1:epochs
epoch_loss = 0.0

for (x_batch, y_batch) in train_loader
    gs = gradient(() -> begin
        y_pred = model(x_batch)
        l = logitcrossentropy(y_pred, y_batch)
        return l
    end, Flux.params(model))

    Flux.Optimise.update!(opt, Flux.params(model), gs)
    epoch_loss += loss_fn(x_batch, y_batch)
end

println("Epoch $epoch, Loss=$(epoch_loss)")

end

The quoted section should probably look like this:

opt_state = Flux.setup(Adam(), model)  # do this just once

for (x_batch, y_batch) in train_loader
    lo, gs = withgradient(m -> begin
        y_pred = m(x_batch)
        l = logitcrossentropy(y_pred, y_batch)
        return l
    end, model) # model is an explicit argument of the function being differentiated

    Flux.update!(opt_state, model, gs[1])  # mutates model and opt_state
    epoch_loss += lo
end

DimensionMismatch: layer Dense(4096 => 128) expects size(input, 1) == 4096, but got 3136×32 Matrix{Float32} getting this error now.