I try to use NeuralPDE to solve the ODE system.
the code I use below:
using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL
import ModelingToolkit: Interval, infimum, supremum
@parameters t
@variables u1(..), u2(..)
D = Differential(t)
omega_R = 120*pi
Pm = 1
E = 1.0566
V = 1
X = 0.8805
Ps = E*V/X
H = 15;
eq = [D(u1(t)) ~ u2(t) - omega_R,
D(u2(t)) ~ (Pm - Ps*sin(u1(t))-1.2*(u2(t)-omega_R)/omega_R)*(omega_R/(2*H))]
bcs = [u1(0) ~ 0.9851+0.01, u2(0) ~ omega_R]
domains = [t ∈ Interval(0.0,10.0)]
dt = 0.01
input_ = length(domains) # number of dimensions
n = 16
chain =[Lux.Chain(Dense(input_,n,Lux.σ),Dense(n,n,Lux.σ),Dense(n,1)) for _ in 1:2]
@named pde_system = PDESystem(eq,bcs,domains,[t],[u1(t),u2(t)])
strategy = NeuralPDE.GridTraining(dt)
discretization = PhysicsInformedNN(chain, strategy)
sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization)
pde_loss_functions = sym_prob.loss_functions.pde_loss_functions
bc_loss_functions = sym_prob.loss_functions.bc_loss_functions
callback = function (p, l)
println("loss: ", l)
println("pde_losses: ", map(l_ -> l_(p), pde_loss_functions))
println("bcs_losses: ", map(l_ -> l_(p), bc_loss_functions))
return false
end
loss_functions = [pde_loss_functions;bc_loss_functions]
function loss_function(θ,p)
sum(map(l->l(θ) ,loss_functions))
end
f_ = OptimizationFunction(loss_function, Optimization.AutoZygote())
prob = Optimization.OptimizationProblem(f_, sym_prob.flat_init_params)
res = Optimization.solve(prob,OptimizationOptimJL.BFGS(); callback = callback, maxiters=5000)
phi = discretization.phi
ts = 0.0:0.01:10.0
minimizers_ = [res.u.depvar[sym_prob.depvars[i]] for i in 1:2]
u_predict = [[phi[i]([t],minimizers_[i])[1] for t in ts] for i in 1:2]
After changing the iterations and getting the loss approx zero (3.571235060904114e-5), I plot the results from NeuralPDE and Tsit5().
The results from Tsit5() have the graph as a sin function, but the results from NeuralPDE just a line.
Please help me to fix my code.