How to generate values after training with Lux.LayerNorm and Lux.BatchNorm

I tried to use Lux.LayerNorm and Dense to train the ODE system on NeuralPDE as follows:

domains = [t ∈ Interval(0.0,25.0)]

chain =[Lux.Chain(Lux.LayerNorm((1,),Lux.relu),Dense(1,10,Lux.tanh),Dense(10,20,Lux.tanh),Dense(20,10,Lux.tanh),Dense(10,1)) for _ in 1:12]

dvs = [delta1(t),delta2(t),delta3(t),omega1(t),omega2(t),omega3(t),TM1(t),TM2(t),TM3(t),Psv1(t),Psv2(t),Psv3(t)]

@named pde_system = PDESystem(eqs,bcs,domains,[t],dvs)


strategy = NeuralPDE.GridTraining(0.01)

discretization = PhysicsInformedNN(chain, strategy)

sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization)

pde_loss_functions = sym_prob.loss_functions.pde_loss_functions
bc_loss_functions = sym_prob.loss_functions.bc_loss_functions

callback = function (p, l)
    println("loss: ", l)
    return false
end
loss_functions =  [pde_loss_functions;bc_loss_functions]

function loss_function(θ,p)
    sum(map(l->l(θ) ,loss_functions))
end

f_ = OptimizationFunction(loss_function, Optimization.AutoZygote())
prob = Optimization.OptimizationProblem(f_, sym_prob.flat_init_params);
phi = discretization.phi;

res = Optimization.solve(prob,OptimizationOptimJL.BFGS(); callback = callback, maxiters = 20000)

After training, I use the code as the tutorial to generate the values of a network as follows:

 ts = 0.0:0.01:25.0
minimizers_ = [res.u.depvar[sym_prob.depvars[i]] for i in 1:12]
u_predict  = [[phi[i]([t],minimizers_[i])[1] for t in ts] for i in 1:12];

However, I got a message:

MethodError: no method matching layernorm(::Vector{Float64}, ::Base.ReshapedArray{Float64, 2, SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true}, Tuple{}}, ::Base.ReshapedArray{Float64, 2, SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true}, Tuple{}}; dims::Colon, epsilon::Float32)

Closest candidates are:
  layernorm(::AbstractArray{<:Real, N}, ::AbstractArray{<:Real, N}, ::AbstractArray{<:Real, N}; dims, epsilon) where N
   @ LuxLib C:\Users\htran\.julia\packages\LuxLib\wG638\src\api\layernorm.jl:32
  layernorm(::AbstractArray, ::Nothing, ::Nothing; dims, epsilon)
   @ LuxLib C:\Users\htran\.julia\packages\LuxLib\wG638\src\api\layernorm.jl:40

And when I tried to use another layer (Lux.BatchNorm) it also had an error when generating a prediction.
The code I used is:

domains = [t ∈ Interval(0.0,25.0)]


chain =[Lux.Chain(Dense(1,10,Lux.tanh),Lux.BatchNorm(10,Lux.relu),Dense(10,20,Lux.tanh),Lux.BatchNorm(20,Lux.relu),Dense(20,10,Lux.tanh),
                  Lux.BatchNorm(10),Dense(10,1)) for _ in 1:12]

dvs = [delta1(t),delta2(t),delta3(t),omega1(t),omega2(t),omega3(t),TM1(t),TM2(t),TM3(t),Psv1(t),Psv2(t),Psv3(t)]

@named pde_system = PDESystem(eqs,bcs,domains,[t],dvs)


strategy = NeuralPDE.GridTraining(0.01)

discretization = PhysicsInformedNN(chain, strategy)
sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization)

pde_loss_functions = sym_prob.loss_functions.pde_loss_functions
bc_loss_functions = sym_prob.loss_functions.bc_loss_functions

callback = function (p, l)
    println("loss: ", l)
    return false
end
loss_functions =  [pde_loss_functions;bc_loss_functions]

function loss_function(θ,p)
    sum(map(l->l(θ) ,loss_functions))
end

f_ = OptimizationFunction(loss_function, Optimization.AutoZygote())
prob = Optimization.OptimizationProblem(f_, sym_prob.flat_init_params);
phi = sym_prob.phi;

res = Optimization.solve(prob,OptimizationOptimJL.BFGS(); callback = callback, maxiters = 100000)

ts = 0.0:0.01:25.0
minimizers_ = [res.u.depvar[sym_prob.depvars[i]] for i in 1:12]
u_predict  = [[phi[i]([t],minimizers_[i])[1] for t in ts] for i in 1:12];

The error’s message is:

BoundsError: attempt to access Tuple{Int64} at index [0]

Stacktrace:
  [1] getindex(t::Tuple, i::Int64)
    @ Base .\tuple.jl:29
  [2] _get_reshape_dims
    @ C:\Users\htran\.julia\packages\LuxLib\wG638\src\utils.jl:39 [inlined]
  [3] _reshape_into_proper_shape
    @ C:\Users\htran\.julia\packages\LuxLib\wG638\src\utils.jl:51 [inlined]
  [4] _normalization(x::Vector{Float64}, running_mean::Vector{Float32}, running_var::Vector{Float32}, scale::SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true}, bias::SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true}, reduce_dims::Val{(1,)}, training::Val{true}, momentum::Float32, epsilon::Float32)
    @ LuxLib C:\Users\htran\.julia\packages\LuxLib\wG638\src\impl\normalization.jl:71
  [5] batchnorm(x::Vector{Float64}, scale::SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true}, bias::SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true}, running_mean::Vector{Float32}, running_var::Vector{Float32}; momentum::Float32, training::Val{true}, epsilon::Float32)
    @ LuxLib C:\Users\htran\.julia\packages\LuxLib\wG638\src\api\batchnorm.jl:47
  [6] (::BatchNorm{true, true, typeof(NNlib.relu), typeof(Lux.zeros32), typeof(Lux.ones32), Float32})(x::Vector{Float64}, ps::ComponentArrays.ComponentVector{Float64, SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true}, Tuple{ComponentArrays.Axis{(scale = 1:10, bias = 11:20)}}}, st::NamedTuple{(:running_mean, :running_var, :training), Tuple{Vector{Float32}, Vector{Float32}, Val{true}}})
    @ Lux C:\Users\htran\.julia\packages\Lux\8FZSB\src\layers\normalize.jl:125
  [7] apply(model::BatchNorm{true, true, typeof(NNlib.relu), typeof(Lux.zeros32), typeof(Lux.ones32), Float32}, x::Vector{Float64}, ps::ComponentArrays.ComponentVector{Float64, SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true}, Tuple{ComponentArrays.Axis{(scale = 1:10, bias = 11:20)}}}, st::NamedTuple{(:running_mean, :running_var, :training), Tuple{Vector{Float32}, Vector{Float32}, Val{true}}})
    @ LuxCore C:\Users\htran\.julia\packages\LuxCore\yC3wg\src\LuxCore.jl:100
  [8] macro expansion
    @ C:\Users\htran\.julia\packages\Lux\8FZSB\src\layers\containers.jl:0 [inlined]
  [9] applychain(layers::NamedTuple{(:layer_1, :layer_2, :layer_3, :layer_4, :layer_5, :layer_6, :layer_7), Tuple{Dense{true, typeof(NNlib.tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, BatchNorm{true, true, typeof(NNlib.relu), typeof(Lux.zeros32), typeof(Lux.ones32), Float32}, Dense{true, typeof(NNlib.tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, BatchNorm{true, true, typeof(NNlib.relu), typeof(Lux.zeros32), typeof(Lux.ones32), Float32}, Dense{true, typeof(NNlib.tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, BatchNorm{true, true, typeof(identity), typeof(Lux.zeros32), typeof(Lux.ones32), Float32}, Dense{true, typeof(identity), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}}}, x::Vector{Float64}, ps::ComponentArrays.ComponentVector{Float64, Vector{Float64}, Tuple{ComponentArrays.Axis{(layer_1 = ViewAxis(1:20, Axis(weight = ViewAxis(1:10, ShapedAxis((10, 1), NamedTuple())), bias = ViewAxis(11:20, ShapedAxis((10, 1), NamedTuple())))), layer_2 = ViewAxis(21:40, Axis(scale = 1:10, bias = 11:20)), layer_3 = ViewAxis(41:260, Axis(weight = ViewAxis(1:200, ShapedAxis((20, 10), NamedTuple())), bias = ViewAxis(201:220, ShapedAxis((20, 1), NamedTuple())))), layer_4 = ViewAxis(261:300, Axis(scale = 1:20, bias = 21:40)), layer_5 = ViewAxis(301:510, Axis(weight = ViewAxis(1:200, ShapedAxis((10, 20), NamedTuple())), bias = ViewAxis(201:210, ShapedAxis((10, 1), NamedTuple())))), layer_6 = ViewAxis(511:530, Axis(scale = 1:10, bias = 11:20)), layer_7 = ViewAxis(531:541, Axis(weight = ViewAxis(1:10, ShapedAxis((1, 10), NamedTuple())), bias = ViewAxis(11:11, ShapedAxis((1, 1), NamedTuple())))))}}}, st::NamedTuple{(:layer_1, :layer_2, :layer_3, :layer_4, :layer_5, :layer_6, :layer_7), Tuple{NamedTuple{(), Tuple{}}, NamedTuple{(:running_mean, :running_var, :training), Tuple{Vector{Float32}, Vector{Float32}, Val{true}}}, NamedTuple{(), Tuple{}}, NamedTuple{(:running_mean, :running_var, :training), Tuple{Vector{Float32}, Vector{Float32}, Val{true}}}, NamedTuple{(), Tuple{}}, NamedTuple{(:running_mean, :running_var, :training), Tuple{Vector{Float32}, Vector{Float32}, Val{true}}}, NamedTuple{(), Tuple{}}}})
    @ Lux C:\Users\htran\.julia\packages\Lux\8FZSB\src\layers\containers.jl:460
 [10] (::Chain{NamedTuple{(:layer_1, :layer_2, :layer_3, :layer_4, :layer_5, :layer_6, :layer_7), Tuple{Dense{true, typeof(NNlib.tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, BatchNorm{true, true, typeof(NNlib.relu), typeof(Lux.zeros32), typeof(Lux.ones32), Float32}, Dense{true, typeof(NNlib.tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, BatchNorm{true, true, typeof(NNlib.relu), typeof(Lux.zeros32), typeof(Lux.ones32), Float32}, Dense{true, typeof(NNlib.tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, BatchNorm{true, true, typeof(identity), typeof(Lux.zeros32), typeof(Lux.ones32), Float32}, Dense{true, typeof(identity), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}}}})(x::Vector{Float64}, ps::ComponentArrays.ComponentVector{Float64, Vector{Float64}, Tuple{ComponentArrays.Axis{(layer_1 = ViewAxis(1:20, Axis(weight = ViewAxis(1:10, ShapedAxis((10, 1), NamedTuple())), bias = ViewAxis(11:20, ShapedAxis((10, 1), NamedTuple())))), layer_2 = ViewAxis(21:40, Axis(scale = 1:10, bias = 11:20)), layer_3 = ViewAxis(41:260, Axis(weight = ViewAxis(1:200, ShapedAxis((20, 10), NamedTuple())), bias = ViewAxis(201:220, ShapedAxis((20, 1), NamedTuple())))), layer_4 = ViewAxis(261:300, Axis(scale = 1:20, bias = 21:40)), layer_5 = ViewAxis(301:510, Axis(weight = ViewAxis(1:200, ShapedAxis((10, 20), NamedTuple())), bias = ViewAxis(201:210, ShapedAxis((10, 1), NamedTuple())))), layer_6 = ViewAxis(511:530, Axis(scale = 1:10, bias = 11:20)), layer_7 = ViewAxis(531:541, Axis(weight = ViewAxis(1:10, ShapedAxis((1, 10), NamedTuple())), bias = ViewAxis(11:11, ShapedAxis((1, 1), NamedTuple())))))}}}, st::NamedTuple{(:layer_1, :layer_2, :layer_3, :layer_4, :layer_5, :layer_6, :layer_7), Tuple{NamedTuple{(), Tuple{}}, NamedTuple{(:running_mean, :running_var, :training), Tuple{Vector{Float32}, Vector{Float32}, Val{true}}}, NamedTuple{(), Tuple{}}, NamedTuple{(:running_mean, :running_var, :training), Tuple{Vector{Float32}, Vector{Float32}, Val{true}}}, NamedTuple{(), Tuple{}}, NamedTuple{(:running_mean, :running_var, :training), Tuple{Vector{Float32}, Vector{Float32}, Val{true}}}, NamedTuple{(), Tuple{}}}})
    @ Lux C:\Users\htran\.julia\packages\Lux\8FZSB\src\layers\containers.jl:457
 [11] (::NeuralPDE.Phi{Chain{NamedTuple{(:layer_1, :layer_2, :layer_3, :layer_4, :layer_5, :layer_6, :layer_7), Tuple{Dense{true, typeof(NNlib.tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, BatchNorm{true, true, typeof(NNlib.relu), typeof(Lux.zeros32), typeof(Lux.ones32), Float32}, Dense{true, typeof(NNlib.tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, BatchNorm{true, true, typeof(NNlib.relu), typeof(Lux.zeros32), typeof(Lux.ones32), Float32}, Dense{true, typeof(NNlib.tanh_fast), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}, BatchNorm{true, true, typeof(identity), typeof(Lux.zeros32), typeof(Lux.ones32), Float32}, Dense{true, typeof(identity), typeof(Lux.glorot_uniform), typeof(Lux.zeros32)}}}}, NamedTuple{(:layer_1, :layer_2, :layer_3, :layer_4, :layer_5, :layer_6, :layer_7), Tuple{NamedTuple{(), Tuple{}}, NamedTuple{(:running_mean, :running_var, :training), Tuple{Vector{Float32}, Vector{Float32}, Val{true}}}, NamedTuple{(), Tuple{}}, NamedTuple{(:running_mean, :running_var, :training), Tuple{Vector{Float32}, Vector{Float32}, Val{true}}}, NamedTuple{(), Tuple{}}, NamedTuple{(:running_mean, :running_var, :training), Tuple{Vector{Float32}, Vector{Float32}, Val{true}}}, NamedTuple{(), Tuple{}}}}})(x::Vector{Float64}, θ::ComponentArrays.ComponentVector{Float64, Vector{Float64}, Tuple{ComponentArrays.Axis{(layer_1 = ViewAxis(1:20, Axis(weight = ViewAxis(1:10, ShapedAxis((10, 1), NamedTuple())), bias = ViewAxis(11:20, ShapedAxis((10, 1), NamedTuple())))), layer_2 = ViewAxis(21:40, Axis(scale = 1:10, bias = 11:20)), layer_3 = ViewAxis(41:260, Axis(weight = ViewAxis(1:200, ShapedAxis((20, 10), NamedTuple())), bias = ViewAxis(201:220, ShapedAxis((20, 1), NamedTuple())))), layer_4 = ViewAxis(261:300, Axis(scale = 1:20, bias = 21:40)), layer_5 = ViewAxis(301:510, Axis(weight = ViewAxis(1:200, ShapedAxis((10, 20), NamedTuple())), bias = ViewAxis(201:210, ShapedAxis((10, 1), NamedTuple())))), layer_6 = ViewAxis(511:530, Axis(scale = 1:10, bias = 11:20)), layer_7 = ViewAxis(531:541, Axis(weight = ViewAxis(1:10, ShapedAxis((1, 10), NamedTuple())), bias = ViewAxis(11:11, ShapedAxis((1, 1), NamedTuple())))))}}})
    @ NeuralPDE C:\Users\htran\.julia\packages\NeuralPDE\F4RlZ\src\pinn_types.jl:365
 [12] (::var"#74#76"{Int64})(t::Float64)
    @ Main .\none:0
 [13] iterate
    @ .\generator.jl:47 [inlined]
 [14] collect(itr::Base.Generator{StepRangeLen{Float64, Base.TwicePrecision{Float64}, Base.TwicePrecision{Float64}, Int64}, var"#74#76"{Int64}})
    @ Base .\array.jl:782
 [15] (::var"#73#75")(i::Int64)
    @ Main .\none:0
 [16] iterate
    @ .\generator.jl:47 [inlined]
 [17] collect(itr::Base.Generator{UnitRange{Int64}, var"#73#75"})
    @ Base .\array.jl:782
 [18] top-level scope
    @ In[26]:3

How could I modify the generated code in these cases?
Thank you all

reshape your inputs to a matrix (i.e. batch size of 1)

for both BatchNorm and LayerNorm or your reply for specifying one of them?

For both. Since your parameters are not vectors, it expects the inputs to be matrices.

Hi, do you mean to reshape the vector ts in his example?

Found!
The solution, for me, was to pass x*ones(1,1) instead of just x:

dx = 10
xs = [infimum(d.domain):(dx / 1):supremum(d.domain) for d in domains][1]
u_predict = [first(phi(x*ones(1,1), res.u)) for x in xs]