How do I Implement the SGD optimiser in Julia code

###############################
# Task a



using DifferentialEquations, ModelingToolkit, MethodOfLines, DomainSets
using Plots
using Random
using Statistics
using ComponentArrays, Lux, DiffEqFlux, OrdinaryDiffEq, Optimization, OptimizationOptimJL,
OptimizationOptimisers
using Optim



rng = Random.default_rng()

β=0.3
γ= 0.1
N=1000
function SIR!(du, u, p, t)
    du[1] = - β*u[1]*u[2]/N
    du[2] = (β*u[1]*u[2]/N)-γ*u[2]
    du[3]=γ*u[2]
end

tspan = (0, 100)
I_0=1
R_0=0
S_0=N-I_0-R_0
u_0=[S_0,I_0,R_0] #initial conditions

prob = ODEProblem(SIR!, u_0, tspan)
sol=solve(prob, reltol= 1e-08, abstol= 1e-08)

SIR_model= plot(sol, linewidth = 1, xaxis = "t", label = ["S" "I" "R"], layout=(2,1), lw=2, size= (800,600))

display(SIR_model)

savefig(SIR_model, raw"C:\Users\super\OneDrive\Desktop\ML bootcamp\bootcamp\SIR_model.png")


Ode_data=Array(sol)



######################################

##Task b
## Using neura l netowrks to approximate the derivative function of the ODEs


NN = Lux.Chain(
    Lux.Dense(3, 50, sigmoid),
    Lux.Dense(50, 32, sigmoid),
    Lux.Dense(32, 3),

)
#Iteration: 4502, Loss: 31.023456387689045


#NN = Lux.Chain(
 #  Lux.Dense(3, 64, tanh),
  # Lux.Dense(64, 50, tanh),
  # Lux.Dense(50, 3),

#)
#Iteration: 9002, Loss: 43.51141431086539








p, st = Lux.setup(rng, NN)

tsteps= sol.t
prob_neuralode = NeuralODE(NN, tspan, Tsit5(), saveat=tsteps, u0=u_0, abstol=1e-7, reltol=1e-7)


scaling_factor=1
function predict_neuralode(p)
    scaling_factor*Array(prob_neuralode(u_0, p, st)[1])
end


function loss_neuralode(p)
    pred = predict_neuralode(p)
    mse_loss = mean(abs2, Ode_data .- pred)
    return mse_loss, pred
end






callback = function (p, l, pred, iter; doplot = true)
    println("Iteration: $iter, Loss: $l")
    if doplot 
        plt = plot(tsteps, Ode_data[1,:,:], label = ["S data"], lw=2)
        plot!(plt, tsteps, pred[1,:,:], label = ["S pred" ], lw=2, linestyle=:dash)
        display(plot(plt))
    end
    return false
end

function create_callback_with_iter(callback)
    iter = Ref(0)
    function (p, l, pred; doplot = true)
        iter[] += 1
        callback(p, l, pred, iter[]; doplot = doplot)
    end
end



#What is component array?
iter_callback = create_callback_with_iter(callback)

comp_array = ComponentArray(p)
iter_callback(comp_array, loss_neuralode(comp_array)...; doplot = true)



######################################
########### Task c
####### use Optimization.jl to solve the problem
adtype = Optimization.AutoZygote()


# 
optf = Optimization.OptimizationFunction((x, p) -> loss_neuralode(x), adtype)
optprob = Optimization.OptimizationProblem(optf, comp_array)





result_neuralode = Optimization.solve(optprob, OptimizationOptimisers.SGD(0.05), callback=iter_callback, maxiters=9000)

At the last line of the code, there seems to be an error that says:

ERROR: UndefVarError: SGD not defined
Stacktrace:
[1] getproperty(x::Module, f::Symbol)
@ Base .\Base.jl:26
[2] top-level scope

I am implementing the Neural ODE on the SIR model. Kindly help

I think the function you are looking for is OptimizationOptimisers.Descent and not OptimizationOptimisers.SGD