Robust resampling/interpolation method

Here’s something less pseudo

using Pkg
cd(@__DIR__)
Pkg.activate(".")
using LowLevelParticleFilters
using LowLevelParticleFilters: SimpleMvNormal
using StaticArrays
using LinearAlgebra
using Random
using Plots
using StatsPlots
using Optim
using SeeToDee

## System definition: Damped nonlinear pendulum with forcing
# This is a simple but interesting nonlinear system with interpretable parameters

"""
    pendulum_dynamics(x, u, p, t)

Nonlinear pendulum dynamics: ẍ = -b*ẋ - g/L*sin(x) + u/m
State: x = [angle, angular_velocity]
Parameters: p = [damping_coefficient, g/L, 1/m]
"""
function pendulum_dynamics(x, u, p, t)
    b, gL, inv_m = p
    θ, ω = x
    u_val = u === nothing ? 0.0 : u[1]

    SA[
        ω,
        -b*ω - gL*sin(θ) + inv_m*u_val
    ]
end

"""
    measurement(x, u, p, t)

Measurement function: we can observe the angle directly
"""
measurement(x, u, p, t) = SA[x[1]]

## Generate synthetic data with known parameters
Random.seed!(42)

# True system parameters
p_true = [0.2, 9.81, 1.0]  # [damping, g/L, 1/m]

# Simulation settings
Ts = 0.01  # Sample time
T = 2000    # Number of time steps
t = 0:Ts:(T-1)*Ts

# Generate input signal (sinusoidal forcing)
u = [SA[0.5*sin(0.3*t_i)] for t_i in t]

# System dimensions
nx = 2  # [angle, angular_velocity]
nu = 1  # [force]
ny = 1  # [angle measurement]

# Noise covariances
R1 = Diagonal(SA[0.0001, 0.001])  # Small process noise
R2 = Diagonal(SA[0.01])         # Measurement noise

# Initial state
x0 = SA[0.5, 0.0]  # Start at angle=0.5 rad, velocity=0

# Create discrete-time dynamics using RK4 integration
discrete_dynamics = SeeToDee.Rk4(pendulum_dynamics, Ts, supersample=2)

# Generate true trajectory using rollout
x_true = LowLevelParticleFilters.rollout(discrete_dynamics, x0, u, p_true)

# Add noise to create measurements
y = [measurement(x, u[i], p_true, t[i]) + SA[0.1*randn()] for (i,x) in enumerate(x_true[1:end-1])]

# Add process noise to true states for more realistic scenario
x_true_noisy = [x_true[i] + SA[sqrt(R1[1,1])*randn(), sqrt(R1[2,2])*randn()] for i in 1:length(x_true)-1]

println("Generated $(length(y)) measurements from nonlinear pendulum system")
println("True parameters: damping=$(p_true[1]), g/L=$(p_true[2]), 1/m=$(p_true[3])")

## Define cost function using multi-step prediction error

"""
    multistep_prediction_cost(p, prediction_horizon)

Cost function that minimizes multi-step prediction errors.

At each time point:
1. Update filter with observation y[t-1]
2. Perform rollout for `prediction_horizon` steps
3. Calculate squared prediction errors
"""
function multistep_prediction_cost(p::Vector{T}, prediction_horizon=5) where T
    # Create filter with current parameter guess
    d0 = SimpleMvNormal(T.(x0), T.(R1))
    ekf = ExtendedKalmanFilter(discrete_dynamics, measurement, R1, R2, d0; nu, ny, p=p)

    # Track filter state with first observation
    correct!(ekf, u[1], y[1], p, 1*Ts)

    cost = zero(T)
    n_predictions = 0

    # Loop through time points (starting from t=2)
    for t_idx in 2:length(y)
        # Update filter with observation at t-1
        predict!(ekf, u[t_idx], p, t_idx*Ts)
        correct!(ekf, u[t_idx], y[t_idx], p, t_idx*Ts)

        # Get current state estimate
        x_current = state(ekf)

        # Determine how many steps we can predict
        steps_remaining = length(y) - t_idx
        n_steps = min(prediction_horizon, steps_remaining)

        if n_steps > 0
            # Perform multi-step rollout
            u_future = u[t_idx+1:t_idx+n_steps]
            x_sim = LowLevelParticleFilters.rollout(discrete_dynamics, x_current, u_future, p)

            # Calculate prediction errors
            for k in 1:n_steps
                y_pred = measurement(x_sim[k], u[t_idx+k], p, (t_idx+k)*Ts)
                y_true = y[t_idx+k]
                error = y_pred - y_true
                cost += sum(abs2, error)
                n_predictions += 1
            end
        end
    end

    # Normalize by number of predictions
    return cost / n_predictions
end

## Perform parameter estimation

# Initial parameter guess (perturbed from true values)
p_guess = p_true .* (1.0 .+ 0.3*randn(length(p_true)))
p_guess = max.(p_guess, 0.01)  # Ensure positive parameters

println("\nInitial parameter guess: $p_guess")
println("Initial cost: $(multistep_prediction_cost(p_guess, 5))")

# Optimize using BFGS
println("\nOptimizing parameters using multi-step prediction cost (horizon=5)...")
result = Optim.optimize(
    p -> multistep_prediction_cost(p, 45),
    p_guess,
    BFGS(),
    Optim.Options(
        show_trace = true,
        store_trace = true,
        show_every = 5,
        iterations = 100,
        g_tol = 1e-8,
    );
    autodiff = :forward
)

p_estimated = Optim.minimizer(result)
println("\n" * "="^60)
println("Optimization complete!")
println("True parameters:      $p_true")
println("Estimated parameters: $p_estimated")
println("Relative error:       $(abs.((p_estimated .- p_true) ./ p_true))")
println("="^60)

## Generate trajectories with estimated parameters for comparison

x_estimated = LowLevelParticleFilters.rollout(discrete_dynamics, x0, u, p_estimated)
y_estimated = [measurement(x, u[i], p_estimated, t[i]) for (i,x) in enumerate(x_estimated[1:end-1])]

x_initial_guess = LowLevelParticleFilters.rollout(discrete_dynamics, x0, u, p_guess)
y_initial_guess = [measurement(x, u[i], p_guess, t[i]) for (i,x) in enumerate(x_initial_guess[1:end-1])]

## Visualize results

# Plot 1: Measurements and predictions
p1 = plot(t, [y[i][1] for i in 1:length(y)], label="Measurements",
          marker=:circle, markersize=2, alpha=0.5,
          xlabel="Time [s]", ylabel="Angle [rad]", title="Multi-Step Prediction Estimation")
plot!(p1, t, [x_true[i][1] for i in 1:length(t)], label="True trajectory", lw=2)
plot!(p1, t, [x_initial_guess[i][1] for i in 1:length(t)], label="Initial guess",
      lw=2, linestyle=:dot, color=:red, alpha=0.7)
plot!(p1, t, [x_estimated[i][1] for i in 1:length(t)], label="Estimated model",
      lw=2, linestyle=:dash)

# Plot 2: Parameter estimates
param_names = ["Damping b", "g/L", "1/m"]
p2 = groupedbar(param_names, [p_true p_guess p_estimated],
         label=["True" "Initial guess" "Estimated"],
         title="Parameter Comparison",
         ylabel="Parameter Value",
         legend=:topright, bar_width=0.7)

# Plot 3: Cost function over optimization
p3 = plot(getproperty.(result.trace, :value),
         xlabel="Iteration",
         ylabel="Cost",
         title="Optimization Convergence",
         yscale=:log10,
         lw=2,
         legend=false)

# Combine plots
plot(p1, p2, p3, layout=(3,1), size=(800, 900))
display(current())

6 Likes