1
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

カルマンフィルタの基礎

Last updated at Posted at 2024-10-12

P114

import numpy as np
import matplotlib.pyplot as plt

# 定数の定義 (Define constants)
A = 1
B = 1
C = 1
Q = 1  # システム雑音の共分散 (System noise covariance)
R = 2  # 観測雑音の共分散 (Observation noise covariance)
N = 300  # データ数 (Number of data points)

# 状態と観測の初期化 (Initialize state and observation)
x = np.zeros(N)
y = np.zeros(N)

# ノイズの生成 (Generate noise)
v = np.random.randn(N) * np.sqrt(Q)  # システム雑音 (System noise)
w = np.random.randn(N) * np.sqrt(R)  # 観測雑音 (Observation noise)

# 状態空間モデルを使ったデータ生成 (Generate data using state-space model)
x[0] = 0  # 初期状態 (Initial state)
y[0] = C * x[0] + w[0]  # 初期観測 (Initial observation)

for k in range(1, N):
    x[k] = A * x[k-1] + B * v[k-1]  # 状態の更新 (State update)
    y[k] = C * x[k] + w[k]  # 観測の更新 (Observation update)

# カルマンフィルタによる状態推定 (Kalman filter state estimation)
xhat = np.zeros(N)  # 状態推定値 (Estimated state)
P = 0  # 誤差共分散 (Error covariance)
xhat[0] = 0  # 初期推定値 (Initial estimate)

# カルマンフィルタの実行 (Run Kalman filter)
for k in range(1, N):
    # 予測ステップ (Prediction step)
    xhatm = A * xhat[k-1]  # 時間更新 (Time update)
    Pm = A * P * A + Q  # 誤差共分散の更新 (Covariance update)
    
    # カルマンゲインの計算 (Calculate Kalman gain)
    G = Pm * C / (C * Pm * C + R)
    
    # 観測更新 (Observation update)
    xhat[k] = xhatm + G * (y[k] - C * xhatm)  # 状態更新 (State update)
    P = (1 - G * C) * Pm  # 誤差共分散の更新 (Covariance update)

# 結果のプロット (Plot results)
plt.figure(figsize=(10,6))
plt.plot(range(N), y, 'k:', label='Measured')
plt.plot(range(N), x, '--', label='True State')
plt.plot(range(N), xhat, 'b', label='Estimated State')
plt.xlabel('No. of samples')
plt.ylabel('Value')
plt.legend()
plt.title('Kalman Filter State Estimation')
plt.grid()
plt.show()

P120

import numpy as np
import matplotlib.pyplot as plt

# システムの定義 (System definition)
A = np.array([[0, -0.7], [1, -1.5]])  # 状態遷移行列 (State transition matrix)
b = np.array([[0.5], [1]])  # 制御行列 (Control matrix)
c = np.array([0, 1])  # 観測行列 (Observation matrix)

# データ数と雑音の設定 (Data size and noise settings)
N = 100  # データ数 (Number of data points)
Q = 1  # システム雑音の共分散 (System noise covariance)
R = 0.1  # 観測雑音の共分散 (Observation noise covariance)

# 観測データの生成 (Generate observation data)
v = np.random.randn(N, 1) * np.sqrt(Q)  # システム雑音 (System noise)
w = np.random.randn(N, 1) * np.sqrt(R)  # 観測雑音 (Observation noise)

# 状態と観測データの初期化 (Initialize state and observation data)
x = np.zeros((N, 2))  # 状態 (State)
y = np.zeros(N)  # 観測 (Observation)

# 初期状態 (Initial state)
y[0] = np.dot(c, x[0, :]) + w[0]

# 状態空間モデルによるデータ生成 (Generate data using state-space model)
for k in range(1, N):
    x[k, :] = np.dot(A, x[k-1, :]) + b.flatten() * v[k-1]  # 状態の更新 (State update)
    y[k] = np.dot(c, x[k, :]) + w[k]  # 観測の更新 (Observation update)

# カルマンフィルタによる推定 (Kalman filter state estimation)
xhat = np.zeros((N, 2))  # 状態推定値 (Estimated state)
P = np.eye(2)  # 誤差共分散行列 (Error covariance matrix)

# 初期推定値 (Initial estimate)
gamma = 1
P = gamma * np.eye(2)  # 誤差共分散の初期値 (Initial error covariance)
xhat[0, :] = [0, 0]  # 初期推定値 (Initial state estimate)

# カルマンフィルタの更新 (Kalman filter update)
for k in range(1, N):
    # 予測ステップ (Prediction step)
    xhatm = np.dot(A, xhat[k-1, :])  # 時間更新 (Time update)
    Pm = np.dot(A, np.dot(P, A.T)) + Q  # 誤差共分散の更新 (Covariance update)
    
    # カルマンゲインの計算 (Calculate Kalman gain)
    G = np.dot(Pm, c.T) / (np.dot(c, np.dot(Pm, c.T)) + R)
    
    # 観測更新 (Observation update)
    xhat[k, :] = xhatm + G * (y[k] - np.dot(c, xhatm))  # 状態更新 (State update)
    P = Pm - np.outer(G, np.dot(c, Pm))  # 誤差共分散の更新 (Covariance update)

# 結果のプロット (Plot results)
plt.figure(figsize=(10,8))

# x1 の推定結果 (Estimation of x1)
plt.subplot(2, 1, 1)
plt.plot(range(N), x[:, 0], 'r--', label='True x1')
plt.plot(range(N), xhat[:, 0], 'b', label='Estimated x1')
plt.xlabel('Samples')
plt.ylabel('x1')
plt.legend()
plt.grid()

# x2 の推定結果と観測値 (Estimation of x2 and measurements)
plt.subplot(2, 1, 2)
plt.plot(range(N), y, 'k:', label='Measured y')
plt.plot(range(N), x[:, 1], 'r--', label='True x2')
plt.plot(range(N), xhat[:, 1], 'b', label='Estimated x2')
plt.xlabel('Samples')
plt.ylabel('x2')
plt.legend()
plt.grid()

plt.tight_layout()
plt.show()

P126

import numpy as np
import matplotlib.pyplot as plt

# PRBS信号の生成 (Generate PRBS signal)
def prbs(N):
    return np.sign(np.random.choice([-1, 1], size=N))

# 制御入力(周期127のM系列)の生成 (Generate control input with PRBS)
u = prbs(127)

# システムの定義 (System definition)
A = np.array([[0, -0.7], [1, -1.5]])  # 状態遷移行列 (State transition matrix)
b = np.array([0.5, 1])  # 制御行列 (Control matrix)
c = np.array([0, 1])  # 観測行列 (Observation matrix)
h = np.array([0.5, 1])  # その他の制御行列 (Additional control matrix)

# データ数と雑音の設定 (Data size and noise settings)
N = len(u)  # データ数 (Number of data points)
Q = 1  # システム雑音の共分散 (System noise covariance)
R = 0.1  # 観測雑音の共分散 (Observation noise covariance)

# 観測データの生成 (Generate observation data)
v = np.random.randn(N) * np.sqrt(Q)  # システム雑音 (System noise)
w = np.random.randn(N) * np.sqrt(R)  # 観測雑音 (Observation noise)

# 状態と観測データの初期化 (Initialize state and observation data)
x = np.zeros((N, 2))  # 状態 (State)
y = np.zeros(N)  # 観測 (Observation)

# 初期状態 (Initial state)
y[0] = np.dot(c, x[0, :]) + w[0]

# 状態空間モデルによるデータ生成 (Generate data using state-space model)
for k in range(1, N):
    x[k, :] = np.dot(A, x[k-1, :]) + h * u[k-1] + b * v[k-1]  # 状態の更新 (State update)
    y[k] = np.dot(c, x[k, :]) + w[k]  # 観測の更新 (Observation update)

# カルマンフィルタによる推定 (Kalman filter state estimation)
xhat = np.zeros((N, 2))  # 状態推定値 (Estimated state)
P = np.eye(2)  # 誤差共分散行列 (Error covariance matrix)

# 初期推定値 (Initial estimate)
gamma = 1
P = gamma * np.eye(2)  # 誤差共分散の初期値 (Initial error covariance)
xhat[0, :] = [0, 0]  # 初期推定値 (Initial state estimate)

# カルマンフィルタの更新 (Kalman filter update)
for k in range(1, N):
    # 予測ステップ (Prediction step)
    xhatm = np.dot(A, xhat[k-1, :]) + h * u[k-1]  # 時間更新 (Time update)
    Pm = np.dot(A, np.dot(P, A.T)) + Q  # 誤差共分散の更新 (Covariance update)
    
    # カルマンゲインの計算 (Calculate Kalman gain)
    G = np.dot(Pm, c.T) / (np.dot(c, np.dot(Pm, c.T)) + R)
    
    # 観測更新 (Observation update)
    xhat[k, :] = xhatm + G * (y[k] - np.dot(c, xhatm))  # 状態更新 (State update)
    P = Pm - np.outer(G, np.dot(c, Pm))  # 誤差共分散の更新 (Covariance update)

# 結果のプロット (Plot results)
plt.figure(figsize=(10,10))

# 制御入力 u のプロット (Plot control input u)
plt.subplot(3, 1, 1)
plt.step(range(N), u, 'r')
plt.xlabel('Samples (k)')
plt.ylabel('u')
plt.xlim([1, N])
plt.ylim([1.2 * np.min(u), 1.2 * np.max(u)])

# x1 の推定結果 (Estimation of x1)
plt.subplot(3, 1, 2)
plt.plot(range(N), x[:, 0], 'r', label='True x1')
plt.plot(range(N), xhat[:, 0], 'b', label='Estimated x1')
plt.xlabel('Samples (k)')
plt.ylabel('x1')
plt.legend()
plt.xlim([1, N])

# x2 の推定結果と観測値 (Estimation of x2 and measurements)
plt.subplot(3, 1, 3)
plt.plot(range(N), y, 'k:', label='Measured y')
plt.plot(range(N), x[:, 1], 'r--', label='True x2')
plt.plot(range(N), xhat[:, 1], 'b', label='Estimated x2')
plt.xlabel('Samples (k)')
plt.ylabel('x2')
plt.legend()
plt.xlim([1, N])

plt.tight_layout()
plt.show()

P136

import numpy as np
import scipy.linalg
from control import ss, lqe

# システム行列 (System matrices)
A = np.array([[0.7683, -0.4940, 0.1129],
              [0.6202,  0,      0     ],
              [-0.0817, 1,      0     ]])

B = np.array([[-0.3832],
              [ 0.5919],
              [ 0.5191]])

C = np.array([[0, 1, 0]])  # 観測行列 (Observation matrix)

# システム雑音と観測雑音の分散 (System noise covariance and observation noise covariance)
Q = np.array([[1]])
R = np.array([[1]])

# 離散時間状態空間モデル (State-space model)
Plant = ss(A, B, C, 0, -1)  # -1 means discrete-time system

# 定常カルマンフィルタの計算 (Steady-state Kalman filter calculation)
P, L, K = lqe(A, B, C, Q, R)

# カルマンゲイン (Kalman gain)
print("Kalman gain (g):\n", K)

# 得られたカルマンフィルタの状態空間モデル (Steady-state Kalman filter state-space model)
# This corresponds to the Kalman filter's state-space form
Akf = A - np.dot(K, C)
Bkf = np.hstack([B, K])
Ckf = np.vstack([C, np.zeros_like(C)])
Dkf = np.array([[0, 0]])

# カルマンフィルタの状態空間モデルを出力 (Display the state-space model)
print("State-space model (A):\n", Akf)
print("State-space model (B):\n", Bkf)
print("State-space model (C):\n", Ckf)
print("State-space model (D):\n", Dkf)

P144

import numpy as np
import matplotlib.pyplot as plt

# パラメータ設定 (Parameter settings)
n = 100  # データ数 (Number of data points)
lam = 0.99  # 忘却係数 (Forgetting factor)
delta = 1000  # 初期誤差共分散行列のスケーリング (Initial P scaling)

# システムの真のパラメータ (True system parameters)
true_theta = np.array([1.5, -0.7, 1.0, 0.5])  # 真のパラメータ (a1, a2, b1, b2)

# 入力信号 (Input signal)
u = np.random.randn(n)

# 雑音 (Noise)
w = 0.1 * np.random.randn(n)

# 出力信号の初期化 (Initialize output signal)
y = np.zeros(n)

# 差分方程式を使って出力データを生成 (Generate output data using difference equation)
for k in range(2, n):
    y[k] = 1.5 * y[k-1] - 0.7 * y[k-2] + u[k-1] + 0.5 * u[k-2] + w[k]

# RLSによるパラメータ推定の初期化 (Initialize RLS parameters)
theta_hat = np.zeros((4, n))  # 推定されるパラメータ (Estimated parameters)
P = delta * np.eye(4)  # 誤差共分散行列 (Error covariance matrix)

# 推定のためのデータの準備 (Prepare data for estimation)
for k in range(2, n):
    phi_k = np.array([y[k-1], y[k-2], u[k-1], u[k-2]])  # 回帰ベクトル (Regression vector)

    # カルマンゲインの計算 (Compute Kalman gain)
    K = P @ phi_k / (lam + phi_k.T @ P @ phi_k)
    
    # パラメータの更新 (Update parameters)
    theta_hat[:, k] = theta_hat[:, k-1] + K * (y[k] - phi_k.T @ theta_hat[:, k-1])
    
    # 誤差共分散行列の更新 (Update error covariance matrix)
    P = (P - np.outer(K, phi_k.T @ P)) / lam

# 結果のプロット (Plot results)
plt.figure(figsize=(10, 6))

# 推定されたパラメータのプロット (Plot estimated parameters)
for i in range(4):
    plt.plot(theta_hat[i, :], label=f'Estimated theta_{i+1}')
    
# 真のパラメータのプロット (Plot true parameters)
for i, true_param in enumerate(true_theta):
    plt.axhline(y=true_param, color='k', linestyle='--', label=f'True theta_{i+1}')

plt.xlabel('Time (k)')
plt.ylabel('Parameter value')
plt.legend()
plt.grid(True)
plt.title('RLS Parameter Estimation')
plt.show()

# 推定された最終的なパラメータを表示 (Display final estimated parameters)
print("Final estimated parameters:", theta_hat[:, -1])

P146

import numpy as np
import matplotlib.pyplot as plt

# パラメータ設定 (Parameter settings)
n = 50  # データ数 (Number of data points)
lam = 1.0  # 忘却係数 (Forgetting factor)
delta = 1.0  # 初期誤差共分散行列のスケーリング (Initial P scaling)

# システムの真のパラメータ (True system parameters)
true_theta = np.array([1.0, 0.5, -1.5, 0.7])  # 真のパラメータ (b1, b2, a1, a2)

# 入力信号 (Input signal)
u = np.sign(np.random.randn(n))  # 符号付きランダム信号

# 雑音 (Noise)
w = 0.2 * np.random.randn(n)

# 出力信号の初期化 (Initialize output signal)
y = np.zeros(n)

# 差分方程式を使って出力データを生成 (Generate output data using difference equation)
for k in range(2, n):
    y[k] = 1.5 * y[k-1] - 0.7 * y[k-2] + u[k-1] + 0.5 * u[k-2] + w[k]

# RLSによるパラメータ推定の初期化 (Initialize RLS parameters)
theta_hat = np.zeros((4, n))  # 推定されるパラメータ (Estimated parameters)
P = delta * np.eye(4)  # 誤差共分散行列 (Error covariance matrix)

# 推定のためのデータの準備 (Prepare data for estimation)
for k in range(2, n):
    phi_k = np.array([-y[k-1], -y[k-2], u[k-1], u[k-2]])  # 回帰ベクトル (Regression vector)

    # カルマンゲインの計算 (Compute Kalman gain)
    K = P @ phi_k / (lam + phi_k.T @ P @ phi_k)
    
    # パラメータの更新 (Update parameters)
    theta_hat[:, k] = theta_hat[:, k-1] + K * (y[k] - phi_k.T @ theta_hat[:, k-1])
    
    # 誤差共分散行列の更新 (Update error covariance matrix)
    P = (P - np.outer(K, phi_k.T @ P)) / lam

# 結果のプロット (Plot results)
plt.figure(figsize=(10, 6))

# 推定されたパラメータのプロット (Plot estimated parameters)
for i in range(4):
    plt.plot(theta_hat[i, :], label=f'Estimated theta_{i+1}')
    
# 真のパラメータのプロット (Plot true parameters)
for i, true_param in enumerate(true_theta):
    plt.axhline(y=true_param, color='k', linestyle='--', label=f'True theta_{i+1}')

plt.xlabel('Time (k)')
plt.ylabel('Parameter value')
plt.legend()
plt.grid(True)
plt.title('RLS Parameter Estimation')
plt.show()

# 推定された最終的なパラメータを表示 (Display final estimated parameters)
print("Final estimated parameters:", theta_hat[:, -1])

P161

import numpy as np
import matplotlib.pyplot as plt

# 非線形システムの関数 (Nonlinear system functions)
def f(x):
    return x + 3 * np.cos(x / 10)

def h(x):
    return x**2 + 3 / 10 * np.sin(x / 10)

def F_jacobian(x):
    return 1 - 3 / 10 * np.sin(x / 10)  # fのヤコビアン (Jacobian of f)

def H_jacobian(x):
    return 2 * x + 3 / 10 * np.cos(x / 10)  # hのヤコビアン (Jacobian of h)

# パラメータの設定 (Parameter settings)
N = 50  # データ数 (Number of data points)
Q = 1  # システム雑音の分散 (System noise variance)
R = 100  # 観測雑音の分散 (Measurement noise variance)

# システム雑音と観測雑音の生成 (Generate system and measurement noise)
v = np.random.randn(N) * np.sqrt(Q)  # システム雑音 (System noise)
w = np.random.randn(N) * np.sqrt(R)  # 観測雑音 (Measurement noise)

# 初期化 (Initialize)
x = np.zeros(N)
y = np.zeros(N)
x[0] = 10
y[0] = h(x[0]) + w[0]

# システムのシミュレーション (System simulation)
for k in range(1, N):
    x[k] = f(x[k-1]) + v[k-1]
    y[k] = h(x[k]) + w[k]

# 拡張カルマンフィルタ (Extended Kalman Filter - EKF)
xhat = np.zeros(N)  # 推定値 (Estimated states)
P = np.zeros(N)  # 誤差共分散行列 (Error covariance)
xhat[0] = 10
P[0] = 1  # 初期推定誤差共分散 (Initial estimate error covariance)

for k in range(1, N):
    # 予測ステップ (Prediction step)
    xhatm = f(xhat[k-1])  # 事前推定 (Predicted state)
    Pm = F_jacobian(xhat[k-1]) * P[k-1] * F_jacobian(xhat[k-1]).T + Q  # 事前誤差共分散 (Predicted error covariance)
    
    # 観測更新ステップ (Measurement update step)
    G = Pm * H_jacobian(xhatm).T / (H_jacobian(xhatm) * Pm * H_jacobian(xhatm).T + R)  # カルマンゲイン (Kalman gain)
    xhat[k] = xhatm + G * (y[k] - h(xhatm))  # 事後推定 (Updated state)
    P[k] = (1 - G * H_jacobian(xhatm)) * Pm  # 事後誤差共分散 (Updated error covariance)

# 結果の表示 (Plot results)
plt.figure()
plt.plot(range(N), x, label='True state')
plt.plot(range(N), xhat, label='Estimated state (EKF)')
plt.xlabel('Time step')
plt.ylabel('State')
plt.legend()
plt.title('Extended Kalman Filter (EKF) - State Estimation')
plt.grid(True)
plt.show()

# 推定された最終的な状態を表示 (Display final estimated state)
print("Final estimated state:", xhat[-1])

P171

import numpy as np
import matplotlib.pyplot as plt

# 非線形変換関数 (Nonlinear transformation functions)
def f(x):
    return np.array([x[0] * np.cos(x[1]), x[0] * np.sin(x[1])])

# ヤコビアン行列 (Jacobian matrix of f)
def F_jacobian(x):
    return np.array([[np.cos(x[1]), -x[0] * np.sin(x[1])],
                     [np.sin(x[1]), x[0] * np.cos(x[1])]])

# UKFのシグマポイント計算用の関数 (Sigma point calculation for UKF)
def sigma_points(mu, sigma, kappa):
    n = len(mu)
    sigma_sqrt = np.linalg.cholesky((n + kappa) * sigma)
    points = np.zeros((2 * n + 1, n))
    points[0] = mu
    for i in range(n):
        points[i + 1] = mu + sigma_sqrt[i]
        points[i + 1 + n] = mu - sigma_sqrt[i]
    return points

# UKFアルゴリズム (UKF algorithm)
def ukf(mu, sigma, y, Q, R, kappa=1):
    n = len(mu)
    
    # シグマポイントの生成 (Generate sigma points)
    sigma_pts = sigma_points(mu, sigma, kappa)
    
    # 各シグマポイントの変換 (Transform each sigma point)
    Y = np.array([f(pt) for pt in sigma_pts])
    
    # 重みの計算 (Calculate weights)
    w0 = kappa / (n + kappa)
    wi = 1 / (2 * (n + kappa))
    
    # 予測される平均 (Predicted mean)
    y_pred = w0 * Y[0]
    for i in range(1, 2 * n + 1):
        y_pred += wi * Y[i]
    
    # 共分散行列の計算 (Calculate covariance matrices)
    Pyy = w0 * np.outer(Y[0] - y_pred, Y[0] - y_pred)
    for i in range(1, 2 * n + 1):
        Pyy += wi * np.outer(Y[i] - y_pred, Y[i] - y_pred)
    
    Pyy += R
    
    # クロス共分散行列の計算 (Cross-covariance matrix)
    Pxy = w0 * np.outer(sigma_pts[0] - mu, Y[0] - y_pred)
    for i in range(1, 2 * n + 1):
        Pxy += wi * np.outer(sigma_pts[i] - mu, Y[i] - y_pred)
    
    # カルマンゲインの計算 (Calculate Kalman gain)
    K = np.dot(Pxy, np.linalg.inv(Pyy))
    
    # 状態と共分散行列の更新 (Update state and covariance)
    mu = mu + np.dot(K, (y - y_pred))
    sigma = sigma - np.dot(K, np.dot(Pyy, K.T))
    
    return mu, sigma

# 初期パラメータの設定 (Initial parameter setup)
N = 500
mu = np.array([10, np.pi / 2])
sigma = np.diag([50, 1])
Q = np.diag([1, 0.025])
R = 1

# サンプルデータの生成 (Generate sample data)
np.random.seed(0)
x = np.random.multivariate_normal(mu, sigma, N)

# 非線形関数fを使用して出力データを生成 (Generate output data using function f)
y = np.array([f(x_i) for x_i in x])

# UKFの実行 (Run UKF)
mu_est = np.copy(mu)
sigma_est = np.copy(sigma)
mu_estimates = []
sigma_estimates = []

for k in range(N):
    mu_est, sigma_est = ukf(mu_est, sigma_est, y[k], Q, R)
    mu_estimates.append(mu_est)
    sigma_estimates.append(sigma_est)

mu_estimates = np.array(mu_estimates)

# 結果のプロット (Plot results)
plt.figure()
plt.plot(x[:, 0], x[:, 1], 'r.', label='True states')
plt.plot(mu_estimates[:, 0], mu_estimates[:, 1], 'b-', label='UKF estimated states')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.title('UKF State Estimation')
plt.grid(True)
plt.show()

P178

import numpy as np
import matplotlib.pyplot as plt

# 定数 (Physical parameters)
rho = 1.23  # 空気密度 (air density)
g = 9.81  # 重力加速度 (gravity)
eta = 6e3  # 抗力定数 (drag constant)
M = 3e4  # 質量 (mass)
a = 3e4  # 特定の定数 (specific constant)

# シミュレーションパラメータ (Simulation parameters)
T = 0.5  # オイラー法離散化周期 (discretization step)
EndTime = 30  # シミュレーション終了時刻 (simulation end time)
time = np.arange(0, EndTime + T, T)  # 時間ステップ (time steps)
N = len(time)  # データ数 (number of data points)
n = 3  # 状態変数の数 (number of state variables)
R = 4e3  # 観測雑音の分散 (measurement noise variance)
Q = 0  # システム雑音なし (no system noise)

# 初期状態 (Initial state)
x_init = np.array([90000, -6000, 0.003])

# システム方程式 f(x) (System equation f(x))
def f(x):
    return np.array([
        x[0] + T * x[1],
        x[1] + T * (0.5 * rho * np.exp(-x[0] / eta) * x[1]**2 * x[2] - g),
        x[2]
    ])

# 観測方程式 h(x) (Measurement equation h(x))
def h(x):
    return np.sqrt(M**2 + (x[0] - a)**2)

# f のヤコビアン (Jacobian of f for EKF)
def A_jacobian(x):
    return np.array([
        [1, T, 0],
        [-0.5 * T * rho / eta * np.exp(-x[0] / eta) * x[1]**2 * x[2], 
         1 + T * rho * np.exp(-x[0] / eta) * x[1] * x[2], 
         0.5 * T * rho * np.exp(-x[0] / eta) * x[1]**2],
        [0, 0, 1]
    ])

# h のヤコビアン (Jacobian of h for EKF)
def C_jacobian(x):
    return np.array([(x[0] - a) / np.sqrt(M**2 + (x[0] - a)**2), 0, 0])

# 観測データ生成 (Generate measurement data)
w = np.random.randn(N) * np.sqrt(R)  # 観測雑音 (measurement noise)
x = np.zeros((N, n))  # 状態変数の記憶領域 (storage for state variables)
y = np.zeros(N)  # 観測データの記憶領域 (storage for measurements)

x[0] = x_init  # 初期状態 (initial state)
y[0] = h(x[0]) + w[0]  # 初期観測データ (initial measurement)

# 状態の時間更新 (State time update)
for k in range(1, N):
    x[k] = f(x[k-1])  # 次の状態 (next state)
    y[k] = h(x[k]) + w[k]  # 観測データ (measurement data)

# 結果をプロット (Plot results)
plt.figure()
plt.plot(time, y, 'r.', label='Measurements')
plt.plot(time, h(x.T), 'b-', label='True values')
plt.xlabel('Time [s]')
plt.ylabel('Measurement')
plt.legend()
plt.title('State and Measurements over Time')
plt.grid(True)
plt.show()

P183

import numpy as np
import matplotlib.pyplot as plt

# パラメータ設定 (Parameter setup)
N = 50  # データ数 (Number of data points)
n = 1  # 状態変数の個数 (Number of state variables)
Q = 1  # システム雑音の分散 (System noise variance)
R = 3  # 観測雑音の分散 (Measurement noise variance)
kappa = 2  # UKFのスケーリングパラメータ (Scaling parameter for UKF)

# システムモデル (System model)
def f(x, k):
    return 0.2 * x + 25 * x / (1 + x**2) + 8 * np.cos(1.2 * k)

def h(x):
    return x**2 / 20

# fのヤコビアン (Jacobian of f for EKF)
def A_jacobian(x):
    return 0.2 + 25 * (1 - x**2) / (1 + x**2)**2

# hのヤコビアン (Jacobian of h for EKF)
def C_jacobian(x):
    return x / 10

# 観測データの生成 (Generate measurement data)
v = np.random.randn(N) * np.sqrt(Q)  # システム雑音 (system noise)
w = np.random.randn(N) * np.sqrt(R)  # 観測雑音 (measurement noise)
x = np.zeros(N)  # 真の状態 (True state)
y = np.zeros(N)  # 観測データ (Measurement data)

x[0] = 0  # 初期状態 (Initial state)
y[0] = h(x[0]) + w[0]  # 初期観測データ (Initial measurement)

# 真の状態と観測データの時間更新 (Update true state and measurements over time)
for k in range(1, N):
    x[k] = f(x[k-1], k) + v[k-1]
    y[k] = h(x[k]) + w[k]

# EKFアルゴリズム (EKF Algorithm)
xhat_ekf = np.zeros(N)  # EKFによる推定値 (EKF estimated state)
P_ekf = 1  # 誤差共分散行列の初期値 (Initial error covariance)

for k in range(1, N):
    # 予測ステップ (Prediction step)
    xhat_ekf[k] = f(xhat_ekf[k-1], k)
    P_ekf = A_jacobian(xhat_ekf[k-1]) * P_ekf * A_jacobian(xhat_ekf[k-1]) + Q
    
    # カルマンゲイン (Kalman gain)
    K_ekf = P_ekf * C_jacobian(xhat_ekf[k]) / (C_jacobian(xhat_ekf[k]) * P_ekf * C_jacobian(xhat_ekf[k]) + R)
    
    # 観測更新ステップ (Measurement update step)
    xhat_ekf[k] += K_ekf * (y[k] - h(xhat_ekf[k]))
    P_ekf = (1 - K_ekf * C_jacobian(xhat_ekf[k])) * P_ekf

# UKFアルゴリズム (UKF Algorithm)
xhat_ukf = np.zeros(N)  # UKFによる推定値 (UKF estimated state)
P_ukf = 1  # 誤差共分散行列の初期値 (Initial error covariance)

def sigma_points(x, P, kappa):
    n = len(x)
    sigmas = np.zeros((2 * n + 1, n))
    sigmas[0] = x
    sqrt_P = np.sqrt((n + kappa) * P)
    for i in range(n):
        sigmas[i + 1] = x + sqrt_P[i]
        sigmas[i + n + 1] = x - sqrt_P[i]
    return sigmas

for k in range(1, N):
    # シグマポイントの生成 (Generate sigma points)
    sigmas = sigma_points(np.array([xhat_ukf[k-1]]), P_ukf, kappa)
    
    # シグマポイントの変換 (Transform sigma points)
    sigmas_f = np.array([f(sigma[0], k) for sigma in sigmas])
    
    # 予測ステップ (Prediction step)
    xhat_ukf[k] = np.mean(sigmas_f)
    P_ukf = np.var(sigmas_f) + Q
    
    # 観測変換 (Measurement transformation)
    sigmas_h = np.array([h(sigma_f) for sigma_f in sigmas_f])
    
    # カルマンゲインの計算 (Kalman gain)
    P_yy = np.var(sigmas_h) + R
    P_xy = np.mean((sigmas_f - xhat_ukf[k]) * (sigmas_h - np.mean(sigmas_h)))
    K_ukf = P_xy / P_yy
    
    # 観測更新 (Measurement update)
    xhat_ukf[k] += K_ukf * (y[k] - np.mean(sigmas_h))
    P_ukf -= K_ukf * P_yy * K_ukf

# 結果のプロット (Plot results)
plt.figure()
plt.plot(range(N), x, 'k-', label='True state')
plt.plot(range(N), xhat_ekf, 'b--', label='EKF estimate')
plt.plot(range(N), xhat_ukf, 'r-.', label='UKF estimate')
plt.xlabel('k')
plt.ylabel('x')
plt.legend()
plt.grid(True)
plt.show()

# RMSEの計算 (Calculate RMSE)
rmse_ekf = np.sqrt(np.mean((x - xhat_ekf)**2))
rmse_ukf = np.sqrt(np.mean((x - xhat_ukf)**2))
print(f"RMSE (EKF): {rmse_ekf:.3f}")
print(f"RMSE (UKF): {rmse_ukf:.3f}")

P187

import numpy as np
import matplotlib.pyplot as plt

# 物理パラメータの設定 (Physical parameter setup)
m = 2.0  # 質量 (mass)
k = 0.7  # バネ定数 (spring constant)
c_true = 1.0  # 真の摩擦係数 (true damping coefficient)
dT = 0.01  # 離散化時間ステップ (time step)
N = 2000  # データ数 (number of data points)
t = np.arange(0, N) * dT  # 時間 (time)
u = 4 * np.sawtooth(t * np.sqrt(2)) + 10 * np.sin(t)  # 入力信号 (input signal)

# 状態遷移方程式 (State transition equation)
def system_dynamics(x, u, c):
    x1, x2 = x
    dxdt = np.array([x2, (-k/m)*x1 - (c/m)*x2 + (1/m)*u])
    return dxdt

# 観測方程式 (Observation equation)
def observation(x):
    return x[0]

# システム雑音と観測雑音の設定 (Set system and measurement noise)
Q = np.diag([1e-5, 1e-5])  # システム雑音の共分散行列 (system noise covariance)
R = 0.1  # 観測雑音の分散 (measurement noise variance)

# 初期状態 (Initial state)
x_true = np.zeros((N, 2))
x_true[0] = [0, 0]  # 初期状態 (initial condition)

# シミュレーションによる真の状態と観測データの生成 (Generate true state and observations)
y = np.zeros(N)
for n in range(1, N):
    x_true[n] = x_true[n-1] + dT * system_dynamics(x_true[n-1], u[n], c_true)
    y[n] = observation(x_true[n]) + np.random.randn() * np.sqrt(R)

# EKFによるパラメータ推定 (EKF for parameter estimation)
xhat = np.zeros((N, 3))  # 状態推定 (state estimate)
P = np.diag([10, 10, 10])  # 誤差共分散行列 (initial covariance)
xhat[0] = [0, 0, 0.1]  # 初期推定値 (initial estimates)

# EKFの更新 (EKF update step)
for n in range(1, N):
    # 予測ステップ (Prediction step)
    A = np.array([[0, 1, 0],
                  [-k/m, -xhat[n-1, 2]/m, -xhat[n-1, 1]/m],
                  [0, 0, 0]])
    xhat_pred = xhat[n-1] + dT * np.array([xhat[n-1, 1], (-k/m)*xhat[n-1, 0] - (xhat[n-1, 2]/m)*xhat[n-1, 1] + (1/m)*u[n], 0])
    P_pred = A @ P @ A.T + Q

    # 観測更新ステップ (Measurement update step)
    C = np.array([1, 0, 0])
    K = P_pred @ C.T / (C @ P_pred @ C.T + R)
    y_pred = C @ xhat_pred
    xhat[n] = xhat_pred + K * (y[n] - y_pred)
    P = (np.eye(3) - np.outer(K, C)) @ P_pred

# 結果のプロット (Plot results)
plt.figure()
plt.subplot(3, 1, 1)
plt.plot(t, x_true[:, 0], 'r', label='True Position')
plt.plot(t, xhat[:, 0], 'b', label='Estimated Position')
plt.ylabel('Position')
plt.legend()

plt.subplot(3, 1, 2)
plt.plot(t, x_true[:, 1], 'r', label='True Velocity')
plt.plot(t, xhat[:, 1], 'b', label='Estimated Velocity')
plt.ylabel('Velocity')
plt.legend()

plt.subplot(3, 1, 3)
plt.plot(t, np.ones(N) * c_true, 'r', label='True Parameter c')
plt.plot(t, xhat[:, 2], 'b', label='Estimated Parameter c')
plt.xlabel('Time [s]')
plt.ylabel('Parameter c')
plt.legend()

plt.show()
P202

import numpy as np
import matplotlib.pyplot as plt

# データ数 (Number of data points)
N = 200

# 真値の生成 (Generate true values)
z = np.zeros(N)
d = np.random.randn(N) * np.sqrt(5)
z[0] = 10
for k in range(1, N):
    z[k] = z[k-1] + 2 * np.cos(0.05 * k) + d[k-1]

# センサ1とセンサ2の出力生成 (Generate sensor outputs)
a = 0.75
A = np.array([[a, 0, 0], [0, 1, 0], [0, 0, 0]])
B = np.array([[1], [1], [0]])
Q = np.diag([60, 60])
R = 1e-4
C = np.array([[1, 1, -1]])

# センサ誤差の生成 (Generate sensor errors)
v = np.random.randn(N, 2) @ np.sqrt(Q)
x = np.zeros((N, 3))
x[0] = [10, 20, 0]  # 初期状態 (Initial state)

# 時間更新 (Time update)
for k in range(1, N):
    x[k] = A @ x[k-1] + B @ v[k-1]

# センサの出力 (Sensor outputs)
e1 = z + x[:, 0]  # センサ1の測定誤差 (Sensor 1 measurement error)
e2 = x[:, 2]      # センサ2の測定誤差 (Sensor 2 measurement error)

# 相補フィルタを適用した出力 (Apply complementary filter)
y1 = z + e1
y2 = z + e2
y = y1 - y2

# 結果のプロット (Plot results)
plt.figure()
plt.subplot(3, 1, 1)
plt.plot(range(N), z, 'k-', label='True')
plt.plot(range(N), y1, 'r--', label='Sensor1')
plt.legend()
plt.xlabel('k')
plt.ylabel('z')

plt.subplot(3, 1, 2)
plt.plot(range(N), z, 'k-', label='True')
plt.plot(range(N), y2, 'r--', label='Sensor2')
plt.legend()
plt.xlabel('k')
plt.ylabel('z')

plt.subplot(3, 1, 3)
plt.plot(range(N), z, 'k-', label='True')
plt.plot(range(N), y, 'b--', label='Corrected')
plt.legend()
plt.xlabel('k')
plt.ylabel('z')

plt.tight_layout()
plt.show()

P29


import numpy as np
import matplotlib.pyplot as plt

# Define system matrices (A, b, c) / システムの行列A, b, cを定義
A = np.array([[-0.5, 1.0], [-0.3, 0.0]])  # State transition matrix / 状態遷移行列
b = np.array([[1.0], [1.0]])  # Control matrix / 制御行列
c = np.array([[-2.5, 3.5]])  # Observation matrix / 観測行列

# Initial state / 初期状態
x = np.array([[0], [0]])

# Generate system noise (v) and observation noise (w) / システム雑音vと観測雑音wを生成
v = np.random.normal(0, 1, (1, 100))  # System noise / システム雑音
w = np.random.normal(0, 1, (1, 100))  # Observation noise / 観測雑音

# Initialize lists to store states and outputs / 状態ベクトルと出力yを保存するリストを初期化
states = []
y = []

# Simulate over 100 time steps / 100ステップのシミュレーション
for k in range(100):
    # Update the state equation / 状態方程式を更新
    x = A @ x + b * v[0, k]
    # Calculate the output / 出力yを計算
    y_k = c @ x + w[0, k]
    # Save the state and output / 状態と出力を保存
    states.append(x.flatten())
    y.append(y_k.flatten())

# Convert lists to numpy arrays for easier plotting / リストをNumPy配列に変換
states = np.array(states)
y = np.array(y)

# Plot the states and output / 状態ベクトルと出力yをプロット
plt.figure(figsize=(10, 6))

# Plot state variables / 状態変数をプロット
plt.subplot(2, 1, 1)
plt.plot(states[:, 0], label='State x1')
plt.plot(states[:, 1], label='State x2')
plt.title('State Variables Over Time')
plt.ylabel('State Value')
plt.legend()

# Plot output y / 出力yをプロット
plt.subplot(2, 1, 2)
plt.plot(y, label='Output y')
plt.title('Output y Over Time')
plt.xlabel('Time Step')
plt.ylabel('Output Value')
plt.legend()

plt.tight_layout()
plt.show()

P34


import numpy as np
import matplotlib.pyplot as plt

# System parameters (a1, a2, b0, b1, b2)
# ARMAモデルの係数 a1, a2, b0, b1, b2 を設定
a1 = 1.2  # Example value / 例の値
a2 = -0.5  # Example value / 例の値
b0 = 1.0  # Example value / 例の値
b1 = 0.5  # Example value / 例の値
b2 = 0.2  # Example value / 例の値

# Define the state-space matrices based on the ARMA model
# ARMAモデルからの状態空間行列を定義
A = np.array([[0, 1], [-a2, -a1]])  # State transition matrix / 状態遷移行列
B = np.array([[0], [1]])  # Input matrix / 入力行列
C = np.array([[b2, b1]])  # Output matrix / 出力行列
D = b0  # Direct term / 直接項

# Initialize the state vector / 状態ベクトルを初期化
x = np.array([[0], [0]])  # Initial state / 初期状態

# Time steps and noise generation / 時間ステップとノイズ生成
n_steps = 100  # Number of time steps / 時間ステップの数
v = np.random.normal(0, 1, (n_steps, 1))  # Input noise (system noise) / 入力雑音(システム雑音)
y = np.zeros(n_steps)  # Output / 出力を初期化

# Simulation loop / シミュレーションループ
for k in range(n_steps):
    # State update equation / 状態更新方程式
    x = A @ x + B * v[k]
    
    # Output equation / 出力方程式
    y[k] = C @ x + D * v[k]

# Plot the results / 結果をプロット
plt.figure(figsize=(10, 6))

# Plot the output y / 出力yのプロット
plt.plot(y, label='Output y')
plt.title('Output y Over Time (ARMA Model)')
plt.xlabel('Time Step')
plt.ylabel('Output')
plt.legend()

plt.tight_layout()
plt.show()

P36


import numpy as np
import matplotlib.pyplot as plt

# Define ARMA model coefficients (a1, a2, b0, b1, b2)
# ARMAモデルの係数 a1, a2, b0, b1, b2 を定義する
a1 = 0.15  # Example value / 例題の値
a2 = 0.8   # Example value / 例題の値
b0 = 1.0   # Example value / 例題の値
b1 = 1.0   # Example value / 例題の値
b2 = 1.0   # Example value / 例題の値

# Define state-space matrices (A, B, C, D)
# 状態空間行列 A, B, C, D を定義する
A = np.array([[0, 1], [-a2, -a1]])  # State transition matrix / 状態遷移行列
B = np.array([[0], [1]])  # Input matrix / 入力行列
C = np.array([b2, b1])    # Output matrix / 出力行列
D = b0  # Direct term / 直接項

# Initial state vector x
# 初期状態ベクトル x
x = np.array([[0], [0]])  # Initial state / 初期状態

# Simulation settings
# シミュレーションのための設定
n_steps = 100  # Number of time steps / 時間ステップの数
v = np.random.normal(0, 1, (n_steps, 1))  # Input noise (system noise) / 入力雑音
y = np.zeros(n_steps)  # Initialize output / 出力の初期化

# Simulation loop
# シミュレーションループ
for k in range(n_steps):
    # Update state equation
    # 状態方程式の更新
    x = A @ x + B * v[k]
    
    # Output equation
    # 出力方程式
    y[k] = C @ x + D * v[k]

# Plot the results
# 結果をプロット
plt.figure(figsize=(10, 6))

# Plot the output y / 出力yのプロット
plt.plot(y, label='Output y')
plt.title('Output y Over Time (ARMA Model)')
plt.xlabel('Time Step')
plt.ylabel('Output')
plt.legend()

plt.tight_layout()
plt.show()

P33


import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal

# Define mean vector (mu) and covariance matrix (P)
# 平均ベクトル (mu) と共分散行列 (P) を定義
mu = np.array([0, 0])  # 平均ベクトル
P = np.array([[1.0, 0.5], [0.5, 1.0]])  # 共分散行列

# Create grid and multivariate normal distribution
# グリッドを作成し、多変量正規分布を定義
x1, x2 = np.mgrid[-3:3:.01, -3:3:.01]
pos = np.dstack((x1, x2))

# Calculate the probability density function
# 確率密度関数を計算
rv = multivariate_normal(mu, P)
z = rv.pdf(pos)

# Plot the 3D surface plot
# 3Dサーフェスプロットを描画
fig = plt.figure(figsize=(10, 7))
ax = fig.add_subplot(111, projection='3d')

# Plot the surface
# サーフェスをプロット
ax.plot_surface(x1, x2, z, cmap='viridis')

# Labels and title
# ラベルとタイトルを設定
ax.set_title('Multivariate Gaussian Distribution')
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('Probability Density')

# Show the plot
# プロットを表示
plt.show()

P39


import numpy as np
import matplotlib.pyplot as plt

# Generate synthetic data for y(k) using AR(2) model
# AR(2) モデルを使って合成データを生成
np.random.seed(0)
N = 100  # Number of data points / データポイントの数
a1 = 0.5  # True parameter for AR(1) term / AR(1)項の真のパラメータ
a2 = 0.3  # True parameter for AR(2) term / AR(2)項の真のパラメータ
v = np.random.normal(0, 1, N)  # Noise / ノイズ
y = np.zeros(N)

# Generate AR(2) data
# AR(2) データを生成
for k in range(2, N):
    y[k] = a1 * y[k-1] + a2 * y[k-2] + v[k]

# Prepare the regression matrix
# 回帰行列を準備
phi = np.zeros((N-2, 2))  # Regression matrix / 回帰行列
Y = y[2:]  # Dependent variable / 目的変数

for k in range(2, N):
    phi[k-2, :] = [y[k-1], y[k-2]]  # Regressors / 説明変数

# Solve the least squares problem
# 最小二乗法を使ってパラメータ推定
theta = np.linalg.inv(phi.T @ phi) @ (phi.T @ Y)

# Plot the results
# 結果をプロット
plt.figure(figsize=(10, 6))

# Plot true values vs estimated values / 真の値と推定値をプロット
plt.plot(y, label='True Data (y)')
plt.plot(range(2, N), phi @ theta, label='Estimated Data (Least Squares)', linestyle='--')

# Add labels and title / ラベルとタイトルを追加
plt.title('AR(2) Model: True vs Estimated Data')
plt.xlabel('Time Step')
plt.ylabel('y(k)')
plt.legend()

# Show the plot / プロットを表示
plt.show()

# Print the estimated parameters
# 推定されたパラメータを表示
print(f"Estimated parameters: a1 = {theta[0]:.4f}, a2 = {theta[1]:.4f}")

P42


import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import hankel, svd

# Step 1: Generate synthetic time series data using a known state-space model
# Step 1: 既知の状態空間モデルを使って時系列データを生成
np.random.seed(0)
n_samples = 100  # サンプル数
n_state = 2  # 状態数
A_true = np.array([[0.7, 0.1], [0.3, 0.9]])  # 真のシステム行列A
B_true = np.array([[0.1], [0.2]])  # 真の入力行列B
C_true = np.array([[1, 0]])  # 真の出力行列C

x = np.zeros((n_samples, n_state))  # 状態ベクトル
y = np.zeros(n_samples)  # 観測データ

# ノイズ
v = np.random.normal(0, 0.1, size=(n_samples, 1))  # システム雑音
w = np.random.normal(0, 0.1, size=n_samples)  # 観測雑音

# 初期状態
x[0] = [0.5, 0.5]

# システムをシミュレート
for k in range(1, n_samples):
    x[k] = A_true @ x[k-1] + B_true @ v[k]
    y[k] = C_true @ x[k] + w[k]

# Step 2: Compute the autocovariance matrix and construct the Hankel matrix
# Step 2: 自己共分散行列を計算し、ハンケル行列を構成する
h = 20  # Hankel matrix block size / ハンケル行列のブロックサイズ
H = hankel(y[:h], y[h-1:])

# Step 3: Singular Value Decomposition (SVD) of the Hankel matrix
# Step 3: ハンケル行列の特異値分解 (SVD)
U, S, Vh = svd(H)
S_diag = np.diag(S)

# Plot singular values to determine system order
# 特異値をプロットし、システムの次数を決定
plt.figure(figsize=(8, 6))
plt.plot(S, 'o-')
plt.title('Singular Values')
plt.xlabel('Index')
plt.ylabel('Singular Value')
plt.grid(True)
plt.show()

# Step 4: Estimate system matrices (A, B, C) using least squares
# Step 4: 最小二乗法を用いてシステム行列 (A, B, C) を推定する
n_order = 2  # システムの次数(特異値から決定)
U1 = U[:, :n_order]  # モード選択
S1 = S_diag[:n_order, :n_order]
V1 = Vh[:n_order, :]

# システム行列の推定
A_est = U1.T @ H[:, 1:] @ V1.T @ np.linalg.inv(S1)
C_est = U1[0, :]
B_est = np.linalg.pinv(U1.T @ H[:, :-1]) @ y[:h]

# 結果を表示
print("Estimated A matrix:\n", A_est)
print("Estimated B matrix:\n", B_est)
print("Estimated C matrix:\n", C_est)

P60


import numpy as np
import matplotlib.pyplot as plt

# Given parameters / 与えられたパラメータ
c = 0.8  # 変換係数
sigma_x = 1.0  # 信号 x の分散
sigma_w = 0.5  # 観測雑音 w の分散

# Generate synthetic data / 合成データの生成
N = 100  # サンプル数
true_x = 2.0  # 真の温度(信号)
w = np.random.normal(0, sigma_w, N)  # 観測雑音
y = c * true_x + w  # 観測値

# Step 1: Calculate the least-squares estimate
# ステップ1: 最小二乗推定値の計算
sigma_sq = 1 / (1/sigma_x**2 + (c**2 / sigma_w**2))  # 分散の推定
x_hat = (sigma_sq * c / sigma_w**2) * np.mean(y)  # 最小二乗推定値

# Step 2: Display results
# ステップ2: 結果を表示
print(f"True value of x: {true_x}")
print(f"Estimated value of x: {x_hat}")
print(f"Variance of the estimate: {sigma_sq}")

# Step 3: Plot the observations and estimate
# ステップ3: 観測値と推定値をプロット
plt.figure(figsize=(10, 6))

plt.plot(y, 'o', label='Observations (y)')
plt.axhline(y=true_x, color='r', linestyle='-', label='True Value of x')
plt.axhline(y=x_hat, color='g', linestyle='--', label='Estimated Value of x')

plt.title('Least-Squares Estimation of x from Observations')
plt.xlabel('Sample Index')
plt.ylabel('Observation / Estimate')
plt.legend()
plt.grid(True)
plt.show()

P69


import numpy as np
import matplotlib.pyplot as plt

# Parameters / パラメータの設定
n = 3  # 状態の次元数
m = 2  # 観測の次元数
p = 2  # 入力の次元数
N = 100  # サンプル数

# 真のシステム行列(既知のパラメータ) / True system matrices (known parameters)
A_true = np.array([[0.7, 0.1, 0.0], [0.3, 0.9, 0.0], [0.0, 0.0, 1.0]])
B_true = np.array([[0.1, 0.0], [0.0, 0.2], [0.1, 0.1]])
C_true = np.array([[1, 0, 0], [0, 1, 0]])

# システム雑音と観測雑音 / System noise and observation noise
w = np.random.normal(0, 0.1, (p, N))
v = np.random.normal(0, 0.1, (m, N))

# 初期状態ベクトル / Initial state vector
x = np.zeros((n, N))
y = np.zeros((m, N))

# 初期状態の設定 / Set initial state
x[:, 0] = np.random.rand(n)

# 観測データを生成 / Generate observation data
for k in range(1, N):
    x[:, k] = A_true @ x[:, k-1] + B_true @ w[:, k]
    y[:, k] = C_true @ x[:, k] + v[:, k]

# 最小二乗推定法の実行 / Apply least-squares estimation
Phi = np.zeros((m * N, n + p))  # 回帰行列 / Regression matrix
Y = y.T.flatten()  # 観測データをベクトル化 / Flattened observation data

for k in range(N):
    row_start = m * k
    row_end = row_start + m
    Phi[row_start:row_end, :n] = C_true @ np.linalg.matrix_power(A_true, k)
    Phi[row_start:row_end, n:] = C_true @ B_true

# 最小二乗推定を解く / Solve the least-squares problem
theta_est = np.linalg.lstsq(Phi, Y, rcond=None)[0]
A_est = theta_est[:n].reshape(n, n)
B_est = theta_est[n:].reshape(n, p)

# 結果の表示 / Display the results
print("True A matrix:\n", A_true)
print("Estimated A matrix:\n", A_est)
print("\nTrue B matrix:\n", B_true)
print("Estimated B matrix:\n", B_est)

# プロット / Plot the results
plt.figure(figsize=(10, 6))
plt.plot(y[0, :], label='True Observation y[0]')
plt.plot(Phi @ theta_est, label='Estimated Observation y[0]', linestyle='--')
plt.title('True vs Estimated Observation')
plt.xlabel('Time Step')
plt.ylabel('Observation Value')
plt.legend()
plt.grid(True)
plt.show()

P72


import numpy as np

# Parameters
n = 3  # 状態の次元数 (number of state variables)
m = 2  # 観測の次元数 (number of observation variables)
p = 2  # 入力の次元数 (number of inputs)
N = 100  # サンプル数 (number of samples)

# 真のシステム行列 (True system matrices)
A_true = np.array([[0.7, 0.1, 0.0], [0.3, 0.9, 0.0], [0.0, 0.0, 1.0]])  # 状態行列 (State transition matrix)
B_true = np.array([[0.1, 0.0], [0.0, 0.2], [0.1, 0.1]])  # 入力行列 (Input matrix)
C_true = np.array([[1, 0, 0], [0, 1, 0]])  # 観測行列 (Observation matrix)

# システム雑音と観測雑音 (System noise and observation noise)
w = np.random.normal(0, 0.1, (p, N))  # 入力ノイズ (Input noise)
v = np.random.normal(0, 0.1, (m, N))  # 観測ノイズ (Observation noise)

# 初期状態ベクトル (Initial state vector)
x = np.zeros((n, N))
y = np.zeros((m, N))

# 初期状態の設定 (Set initial state)
x[:, 0] = np.random.rand(n)

# 観測データを生成 (Generate observation data)
for k in range(1, N):
    x[:, k] = A_true @ x[:, k-1] + B_true @ w[:, k]  # 状態更新 (State update)
    y[:, k] = C_true @ x[:, k] + v[:, k]  # 観測 (Observation)

# 共分散行列の設定 (Covariance matrices)
Sigma_x = np.eye(n)  # 状態の共分散行列 (State covariance matrix)
Sigma_w = np.eye(m)  # 観測雑音の共分散行列 (Observation noise covariance matrix)

# F行列を計算 (Compute matrix F)
F = Sigma_x @ C_true.T @ np.linalg.inv(C_true @ Sigma_x @ C_true.T + Sigma_w)

# 推定された状態 (Estimated state)
x_hat = np.zeros((n, N))  # 初期化 (Initialize)

# 観測 y に基づいて推定を行う (Estimate based on observation y)
for k in range(N):
    x_hat[:, k] = F @ y[:, k]  # 最小二乗推定 (Least squares estimate)

# 結果の表示 (Display results)
print("True State x[:,0]:\n", x[:, 0])
print("Estimated State x_hat[:,0]:\n", x_hat[:, 0])

# 観測と推定結果をプロット (Plot the observations and estimated state)
import matplotlib.pyplot as plt

plt.figure(figsize=(10, 6))

# 観測値と推定値の比較 (Comparison of observations and estimates)
plt.plot(y[0, :], label="Observation y[0]")
plt.plot(x_hat[0, :], label="Estimated State x_hat[0]", linestyle="--")

plt.title("Least Squares Estimation of State Variables")
plt.xlabel("Time Step")
plt.ylabel("Value")
plt.legend()
plt.grid(True)
plt.show()

P79


import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm

# 観測データの生成 / Generate observation data
np.random.seed(0)
true_mean = 1.0  # 真の平均 / True mean
true_sigma = 1.0  # 真の標準偏差 / True standard deviation
N = 50  # 観測数 / Number of observations
y = np.random.normal(true_mean, true_sigma, N)  # 観測値 / Observed data

# 事前分布 / Prior distribution
prior_mean = 0.0  # 事前の平均 / Prior mean
prior_sigma = 1.0  # 事前の標準偏差 / Prior standard deviation

# 尤度関数 / Likelihood function (正規分布の場合)
def likelihood(mu, data):
    return np.prod(norm.pdf(data, mu, true_sigma))

# 事後分布 / Posterior distribution
mu_values = np.linspace(-3, 3, 1000)  # パラメータの候補値 / Candidate values for the parameter
posterior = []

for mu in mu_values:
    # ベイズの定理に従って事後確率を計算 / Calculate the posterior probability using Bayes' theorem
    prior = norm.pdf(mu, prior_mean, prior_sigma)  # 事前分布 / Prior probability
    likelihood_val = likelihood(mu, y)  # 尤度 / Likelihood
    posterior.append(prior * likelihood_val)

# 正規化 / Normalize the posterior
posterior = np.array(posterior)
posterior /= np.sum(posterior)  # Sum to 1

# 事後期待値 / Posterior mean
posterior_mean = np.sum(mu_values * posterior)

# 結果を表示 / Display the results
print(f"True mean: {true_mean}")
print(f"Posterior mean (estimate): {posterior_mean}")

# プロット / Plot the posterior distribution
plt.figure(figsize=(10, 6))
plt.plot(mu_values, posterior, label='Posterior Distribution')
plt.axvline(posterior_mean, color='r', linestyle='--', label=f'Posterior Mean: {posterior_mean:.2f}')
plt.axvline(true_mean, color='g', linestyle='-', label=f'True Mean: {true_mean:.2f}')
plt.title('Posterior Distribution of the Parameter')
plt.xlabel('Parameter (mu)')
plt.ylabel('Probability Density')
plt.legend()
plt.grid(True)
plt.show()

P95


import numpy as np
import matplotlib.pyplot as plt

# Step 1: モデルの設定(状態空間モデルの構築)
# Define system parameters (state-space model)
A = np.array([[1, 1], [0, 1]])  # 状態遷移行列 (State transition matrix)
B = np.array([[0.5], [1.0]])    # 制御行列 (Control matrix)
C = np.array([[1, 0]])          # 観測行列 (Observation matrix)
Q = np.array([[0.01, 0], [0, 0.01]])  # システム雑音の共分散 (Process noise covariance)
R = np.array([[0.1]])                 # 観測雑音の共分散 (Measurement noise covariance)
x_true = np.array([[0], [1]])         # 真の初期状態 (True initial state)

# シミュレーションの設定 (Simulation settings)
N = 50  # 時間ステップの数 (Number of time steps)
u = np.ones((N, 1))  # 定常制御入力 (Constant control input)
w = np.random.multivariate_normal([0, 0], Q, N).T  # システム雑音 (Process noise)
v = np.random.normal(0, np.sqrt(R[0][0]), N)       # 観測雑音 (Measurement noise)

# 真の状態と観測値の生成 (Generate true states and observations)
x_true_history = np.zeros((2, N))
y = np.zeros(N)

for k in range(1, N):
    x_true = A @ x_true + B @ u[k] + w[:, k:k+1]  # 真の状態遷移 (True state update)
    x_true_history[:, k] = x_true[:, 0]
    y[k] = C @ x_true + v[k]  # 観測 (Observation)

# Step 2: カルマンフィルタによる状態推定 (Kalman filter for state estimation)
x_est = np.zeros((2, N))  # 推定された状態 (Estimated states)
P = np.eye(2)  # 状態推定の誤差共分散行列 (Error covariance matrix)
x_est[:, 0] = [0, 0]  # 初期推定状態 (Initial estimated state)

# カルマンフィルタの実行 (Run Kalman filter)
for k in range(1, N):
    # 予測ステップ (Prediction step)
    x_pred = A @ x_est[:, k-1:k] + B @ u[k]  # 状態の予測 (State prediction)
    P_pred = A @ P @ A.T + Q  # 誤差共分散の予測 (Covariance prediction)

    # 更新ステップ (Update step)
    K = P_pred @ C.T @ np.linalg.inv(C @ P_pred @ C.T + R)  # カルマンゲインの計算 (Kalman gain)
    x_est[:, k:k+1] = x_pred + K @ (y[k] - C @ x_pred)  # 状態の更新 (State update)
    P = (np.eye(2) - K @ C) @ P_pred  # 誤差共分散の更新 (Covariance update)

# 結果をプロット (Plot the results)
plt.figure(figsize=(10, 6))
plt.plot(x_true_history[0, :], label='True Position')
plt.plot(x_est[0, :], label='Estimated Position', linestyle='--')
plt.plot(y, label='Observations', linestyle=':', color='gray')
plt.title('Kalman Filter State Estimation')
plt.xlabel('Time Step')
plt.ylabel('Position')
plt.legend()
plt.grid(True)
plt.show()

P96


import numpy as np
import matplotlib.pyplot as plt

# モデルパラメータの設定 (Define model parameters)
A = np.array([[1.0, 1.0], [0, 1.0]])  # 状態遷移行列 (State transition matrix)
b = np.array([[0.5], [1.0]])  # 制御行列 (Control matrix)
c = np.array([[1.0, 0.0]])  # 観測行列 (Observation matrix)
Q = np.array([[0.01, 0.0], [0.0, 0.01]])  # システム雑音共分散 (Process noise covariance)
R = np.array([[0.1]])  # 観測雑音共分散 (Measurement noise covariance)

# 初期状態 (Initial state)
x_true = np.array([[0.0], [1.0]])  # 真の状態 (True state)
x_est = np.array([[0.0], [0.0]])  # 推定された状態 (Estimated state)
P = np.eye(2)  # 誤差共分散行列 (Error covariance)

# シミュレーションの設定 (Simulation settings)
N = 50  # サンプル数 (Number of time steps)
u = np.ones((N, 1))  # 制御入力 (Control input)
w = np.random.multivariate_normal([0, 0], Q, N).T  # システム雑音 (Process noise)
v = np.random.normal(0, np.sqrt(R[0][0]), N)  # 観測雑音 (Measurement noise)

# 真の状態と観測値の履歴 (True states and observations)
x_true_history = np.zeros((2, N))
y = np.zeros(N)

# 真の状態と観測値を生成 (Generate true states and observations)
for k in range(1, N):
    x_true = A @ x_true + b @ u[k] + w[:, k:k+1]  # 状態更新 (State update)
    x_true_history[:, k] = x_true[:, 0]
    y[k] = c @ x_true + v[k]  # 観測 (Observation)

# カルマンフィルタの実行 (Run Kalman filter)
x_est_history = np.zeros((2, N))  # 推定された状態の履歴 (Estimated state history)
x_est_history[:, 0] = x_est[:, 0]

for k in range(1, N):
    # 予測ステップ (Prediction step)
    x_pred = A @ x_est + b @ u[k]  # 状態の予測 (State prediction)
    P_pred = A @ P @ A.T + Q  # 誤差共分散の予測 (Covariance prediction)

    # カルマンゲインの計算 (Compute Kalman gain)
    K = P_pred @ c.T @ np.linalg.inv(c @ P_pred @ c.T + R)

    # 状態の更新 (State update)
    x_est = x_pred + K @ (y[k] - c @ x_pred)  # フィルタリングステップ (Filtering step)
    P = (np.eye(2) - K @ c) @ P_pred  # 共分散行列の更新 (Covariance update)

    # 推定結果を保存 (Save the estimated state)
    x_est_history[:, k] = x_est[:, 0]

# 結果をプロット (Plot the results)
plt.figure(figsize=(10, 6))
plt.plot(x_true_history[0, :], label='True Position')
plt.plot(x_est_history[0, :], label='Estimated Position', linestyle='--')
plt.plot(y, label='Observations', linestyle=':', color='gray')
plt.title('Kalman Filter: True State, Estimated State, and Observations')
plt.xlabel('Time Step')
plt.ylabel('Position')
plt.legend()
plt.grid(True)
plt.show()

P100


import numpy as np
import matplotlib.pyplot as plt

# モデルの設定 (State-space model parameters)
A = np.array([[1.0, 1.0], [0.0, 1.0]])  # 状態遷移行列 (State transition matrix)
b = np.array([[0.5], [1.0]])  # 制御行列 (Control matrix)
c = np.array([[1.0, 0.0]])  # 観測行列 (Observation matrix)
Q = np.array([[0.01, 0.0], [0.0, 0.01]])  # システム雑音共分散 (Process noise covariance)
R = np.array([[0.1]])  # 観測雑音共分散 (Measurement noise covariance)

# 初期状態 (Initial state)
x_true = np.array([[0.0], [1.0]])  # 真の初期状態 (True initial state)
x_est = np.array([[0.0], [0.0]])  # 推定された状態の初期化 (Initial estimated state)
P = np.eye(2)  # 誤差共分散行列の初期化 (Initial error covariance)

# シミュレーションの設定 (Simulation settings)
N = 50  # サンプル数 (Number of time steps)
u = np.ones((N, 1))  # 制御入力 (Control input)
w = np.random.multivariate_normal([0, 0], Q, N).T  # システム雑音 (Process noise)
v = np.random.normal(0, np.sqrt(R[0][0]), N)  # 観測雑音 (Measurement noise)

# 真の状態と観測値の履歴 (True state and observations)
x_true_history = np.zeros((2, N))
y = np.zeros(N)

# 真の状態と観測値を生成 (Generate true states and observations)
for k in range(1, N):
    x_true = A @ x_true + b @ u[k] + w[:, k:k+1]  # 状態更新 (True state update)
    x_true_history[:, k] = x_true[:, 0]
    y[k] = c @ x_true + v[k]  # 観測 (Observation)

# カルマンフィルタの実行 (Run Kalman filter)
x_est_history = np.zeros((2, N))  # 推定された状態の履歴 (Estimated state history)
x_est_history[:, 0] = x_est[:, 0]

for k in range(1, N):
    # 予測ステップ (Prediction step)
    x_pred = A @ x_est + b @ u[k]  # 事前推定値 (a priori estimate)
    P_pred = A @ P @ A.T + Q  # 誤差共分散の予測 (Covariance prediction)

    # カルマンゲインの計算 (Compute Kalman gain)
    K = P_pred @ c.T @ np.linalg.inv(c @ P_pred @ c.T + R)

    # 状態の更新 (Update step)
    x_est = x_pred + K @ (y[k] - c @ x_pred)  # 事後推定値 (a posteriori estimate)
    P = (np.eye(2) - K @ c) @ P_pred  # 共分散行列の更新 (Covariance update)

    # 推定された状態を保存 (Save the estimated state)
    x_est_history[:, k] = x_est[:, 0]

# 結果をプロット (Plot the results)
plt.figure(figsize=(10, 6))
plt.plot(x_true_history[0, :], label='True Position')
plt.plot(x_est_history[0, :], label='Estimated Position', linestyle='--')
plt.plot(y, label='Observations', linestyle=':', color='gray')
plt.title('Kalman Filter: True State, Estimated State, and Observations')
plt.xlabel('Time Step')
plt.ylabel('Position')
plt.legend()
plt.grid(True)
plt.show()

P104


import numpy as np
import matplotlib.pyplot as plt

# カルマンフィルタの設定 (Kalman Filter Parameters)
A = np.array([[1.0, 1.0], [0.0, 1.0]])  # 状態遷移行列 (State transition matrix)
b = np.array([[0.5], [1.0]])  # 制御行列 (Control matrix)
c = np.array([[1.0, 0.0]])  # 観測行列 (Observation matrix)
Q = np.array([[0.01, 0.0], [0.0, 0.01]])  # システム雑音共分散 (Process noise covariance)
R = np.array([[0.1]])  # 観測雑音共分散 (Measurement noise covariance)

# 初期状態と共分散 (Initial state and covariance)
x_true = np.array([[0.0], [1.0]])  # 真の初期状態 (True initial state)
x_est = np.array([[0.0], [0.0]])  # 推定された状態の初期化 (Initial estimated state)
P = np.eye(2)  # 誤差共分散行列の初期化 (Initial error covariance)

# シミュレーションの設定 (Simulation settings)
N = 50  # サンプル数 (Number of time steps)
u = np.ones((N, 1))  # 制御入力 (Control input)
w = np.random.multivariate_normal([0, 0], Q, N).T  # システム雑音 (Process noise)
v = np.random.normal(0, np.sqrt(R[0][0]), N)  # 観測雑音 (Measurement noise)

# 真の状態と観測値の履歴 (True state and observations)
x_true_history = np.zeros((2, N))
y = np.zeros(N)

# 真の状態と観測値を生成 (Generate true states and observations)
for k in range(1, N):
    x_true = A @ x_true + b @ u[k] + w[:, k:k+1]  # 状態更新 (True state update)
    x_true_history[:, k] = x_true[:, 0]
    y[k] = c @ x_true + v[k]  # 観測 (Observation)

# カルマンフィルタの実行 (Run Kalman filter)
x_est_history = np.zeros((2, N))  # 推定された状態の履歴 (Estimated state history)
x_est_history[:, 0] = x_est[:, 0]

for k in range(1, N):
    # Step 1: 予測ステップ (Prediction step)
    x_pred = A @ x_est + b @ u[k]  # 事前推定値 (a priori estimate)
    P_pred = A @ P @ A.T + Q  # 事前共分散行列 (Predicted covariance)

    # Step 2: カルマンゲインの計算 (Calculate Kalman Gain)
    K = P_pred @ c.T @ np.linalg.inv(c @ P_pred @ c.T + R)  # カルマンゲイン (Kalman Gain)

    # Step 3: 状態の更新 (Update step)
    x_est = x_pred + K @ (y[k] - c @ x_pred)  # 事後推定値 (a posteriori estimate)
    P = (np.eye(2) - K @ c) @ P_pred  # 事後共分散行列 (Updated covariance)

    # 推定された状態を保存 (Save the estimated state)
    x_est_history[:, k] = x_est[:, 0]

# 結果をプロット (Plot the results)
plt.figure(figsize=(10, 6))
plt.plot(x_true_history[0, :], label='True Position')
plt.plot(x_est_history[0, :], label='Estimated Position', linestyle='--')
plt.plot(y, label='Observations', linestyle=':', color='gray')
plt.title('Kalman Filter: True State, Estimated State, and Observations')
plt.xlabel('Time Step')
plt.ylabel('Position')
plt.legend()
plt.grid(True)
plt.show()

P114


import numpy as np
import matplotlib.pyplot as plt

# カルマンフィルタの設定 (Kalman Filter Parameters)
A = np.array([[1]])  # 状態遷移行列 (State transition matrix)
B = np.array([[1]])  # システム雑音入力行列 (Process noise input matrix)
C = np.array([[1]])  # 観測行列 (Observation matrix)
Q = np.array([[1]])  # システム雑音共分散 (Process noise covariance)
R = np.array([[2]])  # 観測雑音共分散 (Measurement noise covariance)

# 初期状態と共分散 (Initial state and covariance)
x_est = np.array([[0]])  # 状態推定値の初期化 (Initial estimated state)
P = np.array([[0]])  # 誤差共分散行列の初期化 (Initial error covariance)

# シミュレーションの設定 (Simulation settings)
N = 4  # サンプル数 (Number of time steps)
v = np.random.normal(0, np.sqrt(Q[0][0]), N)  # システム雑音 (Process noise)
w = np.random.normal(0, np.sqrt(R[0][0]), N)  # 観測雑音 (Measurement noise)
y = np.zeros(N)  # 観測値の履歴 (Observation history)
x_true = np.zeros(N)  # 真の状態の履歴 (True state history)

# 状態と観測値の初期化 (Initial values for state and observation)
x_true[0] = 0
y[0] = x_true[0] + w[0]

# 状態と観測値の生成 (Generate true states and observations)
for k in range(1, N):
    x_true[k] = x_true[k - 1] + v[k - 1]  # 真の状態の更新 (True state update)
    y[k] = x_true[k] + w[k]  # 観測値の生成 (Generate observation)

# カルマンフィルタの実行 (Run Kalman filter)
x_est_history = np.zeros(N)  # 推定された状態の履歴 (Estimated state history)
x_est_history[0] = x_est  # 初期推定値を保存 (Store the initial estimate)

for k in range(1, N):
    # Step 1: 予測ステップ (Prediction step)
    x_pred = A @ x_est  # 事前推定値 (a priori estimate)
    P_pred = A @ P @ A.T + B @ Q @ B.T  # 事前共分散行列 (Predicted covariance)

    # Step 2: カルマンゲインの計算 (Calculate Kalman Gain)
    K = P_pred @ C.T @ np.linalg.inv(C @ P_pred @ C.T + R)  # カルマンゲイン (Kalman Gain)

    # Step 3: 状態の更新 (Update step)
    x_est = x_pred + K @ (y[k] - C @ x_pred)  # 事後推定値 (a posteriori estimate)
    P = (np.eye(1) - K @ C) @ P_pred  # 事後共分散行列 (Updated covariance)

    # 推定された状態を保存 (Save the estimated state)
    x_est_history[k] = x_est

# 結果をプロット (Plot the results)
plt.figure(figsize=(10, 6))
plt.plot(x_true, label='True State')
plt.plot(x_est_history, label='Estimated State', linestyle='--')
plt.plot(y, label='Observations', linestyle=':', color='gray')
plt.title('Kalman Filter: True State, Estimated State, and Observations (Multivariate)')
plt.xlabel('Time Step')
plt.ylabel('State Value')
plt.legend()
plt.grid(True)
plt.show()

P120


import numpy as np
import matplotlib.pyplot as plt

# Kalman Filter Implementation for 1D time series

# Parameters initialization
A = 1  # State transition matrix (in this case, a scalar)
B = 0  # Control matrix (assuming no control input here)
C = 1  # Measurement matrix (relating state to measurement)
Q = 1  # Process noise covariance
R = 2  # Measurement noise covariance
initial_state = 0  # Initial estimate of the state
initial_covariance = 1  # Initial covariance estimate

# Time series length
N = 50

# Simulate some true states and noisy observations
np.random.seed(42)
true_state = np.zeros(N)
observations = np.zeros(N)
true_state[0] = 10  # Initial true state
for k in range(1, N):
    true_state[k] = A * true_state[k - 1] + np.random.normal(0, np.sqrt(Q))  # True state evolution
    observations[k] = C * true_state[k] + np.random.normal(0, np.sqrt(R))  # Noisy measurements

# Kalman filter variables
state_estimates = np.zeros(N)
state_estimates[0] = initial_state
covariances = np.zeros(N)
covariances[0] = initial_covariance

for k in range(1, N):
    # Time update (prediction)
    state_predict = A * state_estimates[k - 1] + B  # No control input, B = 0
    covariance_predict = A * covariances[k - 1] * A + Q

    # Measurement update (correction)
    kalman_gain = covariance_predict * C / (C * covariance_predict * C + R)
    state_estimates[k] = state_predict + kalman_gain * (observations[k] - C * state_predict)
    covariances[k] = (1 - kalman_gain * C) * covariance_predict

# Plotting the results
plt.figure(figsize=(10, 6))
plt.plot(true_state, label="True State")
plt.plot(observations, label="Observations", linestyle='dashed', color='orange')
plt.plot(state_estimates, label="Kalman Filter Estimate", linestyle='dotted', color='green')
plt.title('Kalman Filter State Estimation')
plt.xlabel('Time Step')
plt.ylabel('State')
plt.legend()
plt.grid(True)
plt.show()

P122


import numpy as np

# Time steps
N = 10  # Number of time steps

# System matrices
A = np.array([[1, 0], [0, 1]])  # State transition matrix
B = np.array([[0.5], [0.1]])    # Control input matrix
C = np.array([[0, 1]])          # Observation matrix
Q = np.array([[0.01, 0], [0, 0.01]])  # System noise covariance
R = 0.1  # Observation noise covariance
P = np.eye(2)  # Initial error covariance matrix
x = np.zeros((2, 1))  # Initial state
x_hat = np.zeros((2, 1))  # Initial estimate
u = np.random.choice([-1, 1], N)  # Random control inputs
y = np.zeros((N, 1))  # Observations

# Kalman filter storage arrays
x_hat_arr = np.zeros((2, N))  # Filtered state estimates
P_arr = np.zeros((2, 2, N))  # Error covariance matrices
G_arr = np.zeros((2, N))  # Kalman gains

# Simulation (Assume system noise and observation noise as white Gaussian noise)
for k in range(1, N):
    # System model
    x = A @ x + B * u[k-1] + np.random.multivariate_normal([0, 0], Q).reshape(-1, 1)
    y[k] = C @ x + np.random.normal(0, R)
    
    # Prediction step
    x_hat_minus = A @ x_hat + B * u[k-1]
    P_minus = A @ P @ A.T + Q
    
    # Kalman gain calculation
    G = P_minus @ C.T @ np.linalg.inv(C @ P_minus @ C.T + R)
    
    # Update step
    x_hat = x_hat_minus + G @ (y[k] - C @ x_hat_minus)
    P = (np.eye(2) - G @ C) @ P_minus
    
    # Store results
    x_hat_arr[:, k] = x_hat.reshape(-1)
    P_arr[:, :, k] = P
    G_arr[:, k] = G.reshape(-1)

# Output results
print("State Estimates (x_hat):")
print(x_hat_arr)
print("Kalman Gains (G):")
print(G_arr)

P127

import numpy as np

# Kalman Filter Initialization
def kalman_filter(y, A, B, C, Q, R, x0, P0):
    n_timesteps = len(y)
    n = len(A)  # State dimension

    # Pre-allocate space for arrays
    x_hat = np.zeros((n_timesteps, n))  # Estimated state
    P = np.zeros((n_timesteps, n, n))   # Error covariance matrix
    x_hat_minus = np.zeros((n_timesteps, n))  # Prior estimated state
    P_minus = np.zeros((n_timesteps, n, n))   # Prior error covariance matrix
    K = np.zeros((n_timesteps, n))  # Kalman gain

    # Initial conditions
    x_hat[0] = x0
    P[0] = P0

    for k in range(1, n_timesteps):
        # Prediction step (time update)
        x_hat_minus[k] = np.dot(A, x_hat[k-1]) + B
        P_minus[k] = np.dot(A, np.dot(P[k-1], A.T)) + Q

        # Measurement update (correction step)
        S = np.dot(C, np.dot(P_minus[k], C.T)) + R  # Measurement prediction covariance
        K[k] = np.dot(P_minus[k], C.T) / S  # Kalman gain

        x_hat[k] = x_hat_minus[k] + K[k] * (y[k] - np.dot(C, x_hat_minus[k]))
        P[k] = P_minus[k] - np.dot(K[k], np.dot(C, P_minus[k]))

    return x_hat, P, K

# Example of usage
# Define system parameters (for a simple system)
A = np.array([[1]])  # State transition matrix
B = np.array([[0]])  # Control matrix
C = np.array([[1]])  # Observation matrix
Q = np.array([[1]])  # Process noise covariance
R = np.array([[2]])  # Measurement noise covariance
x0 = np.array([0])   # Initial state estimate
P0 = np.array([[1]]) # Initial estimate covariance

# Simulated measurements (for testing)
y = np.array([1, 2, 3, 2.5, 2, 1.5, 1, 0.5, 0])

# Run Kalman filter
x_hat, P, K = kalman_filter(y, A, B, C, Q, R, x0, P0)

# Print the results
for k in range(len(y)):
    print(f"Time step {k}: State estimate = {x_hat[k]}, Kalman Gain = {K[k]}")

P132


import numpy as np
import matplotlib.pyplot as plt
from scipy import signal

# Define the transfer function for the Kalman filter
# G(z) = 0.5 / (1 - 0.5z^-1)
numerator = [0.5]
denominator = [1, -0.5]

# Create a discrete-time transfer function
system = signal.dlti(numerator, denominator)

# Frequency range for plotting (0 to 0.5 Hz)
w, h = signal.dfreqresp(system, n=500)

# Convert from rad/sample to Hz (assuming sample rate = 1 Hz for simplicity)
freq = w / (2 * np.pi)

# Plot Gain in dB
plt.figure(figsize=(10, 6))

# Gain (dB)
plt.subplot(2, 1, 1)
plt.plot(freq, 20 * np.log10(abs(h)))
plt.title('Frequency Response of Kalman Filter')
plt.ylabel('Gain (dB)')
plt.grid(True)

# Phase (degrees)
plt.subplot(2, 1, 2)
plt.plot(freq, np.angle(h, deg=True))
plt.ylabel('Phase (degrees)')
plt.xlabel('Frequency (Hz)')
plt.grid(True)

# Show plots
plt.tight_layout()
plt.show()

P140


import numpy as np
import matplotlib.pyplot as plt

# シミュレーションの設定
np.random.seed(0)
N = 100  # データポイント数
true_params = np.array([0.5, -0.3])  # 真のパラメータ a1, a2
sigma_w = 1.0  # 観測ノイズの分散
gamma = 1000  # 共分散行列の初期値の定数

# 入力信号とノイズを生成
u = np.random.randn(N)
w = sigma_w * np.random.randn(N)  # ノイズ

# システムの出力 y を生成 (y(k) + a1*y(k-1) + a2*y(k-2) = b1*u(k-1) + b2*u(k-2) + w(k))
y = np.zeros(N)
for k in range(2, N):
    y[k] = true_params[0] * y[k-1] + true_params[1] * y[k-2] + u[k-1] + w[k]

# カルマンフィルタの初期設定
theta_hat = np.zeros((2, N))  # パラメータ推定値の初期化
P = gamma * np.eye(2)  # 共分散行列の初期化

# カルマンフィルタのアルゴリズム
for k in range(2, N):
    # 回帰ベクトルの定義
    phi_k = np.array([y[k-1], y[k-2]])
    
    # カルマンゲインの計算
    K = P @ phi_k / (phi_k.T @ P @ phi_k + sigma_w**2)
    
    # パラメータ推定値の更新
    theta_hat[:, k] = theta_hat[:, k-1] + K * (y[k] - phi_k.T @ theta_hat[:, k-1])
    
    # 共分散行列の更新
    P = P - np.outer(K, phi_k.T @ P)

# 結果のプロット
plt.figure(figsize=(10, 6))
plt.plot(range(N), theta_hat[0, :], label="Estimated a1")
plt.plot(range(N), theta_hat[1, :], label="Estimated a2")
plt.axhline(y=true_params[0], color='r', linestyle='--', label="True a1")
plt.axhline(y=true_params[1], color='g', linestyle='--', label="True a2")
plt.xlabel("Time step")
plt.ylabel("Parameter value")
plt.legend()
plt.title("Kalman Filter Parameter Estimation")
plt.grid(True)
plt.show()

P152


import numpy as np

# Function to compute the Jacobian matrix (example function, modify based on your system)
def jacobian_matrix(f, x):
    # Assuming f is a function of x, and x is the state vector
    # Use numerical differentiation to compute the Jacobian
    eps = 1e-5
    n = len(x)
    J = np.zeros((n, n))
    f0 = f(x)
    for i in range(n):
        x_eps = np.copy(x)
        x_eps[i] += eps
        J[:, i] = (f(x_eps) - f0) / eps
    return J

# Function f(x) representing the non-linear system dynamics
def f(x, u):
    # Example non-linear system: x_dot = Ax + Bu (modify for your system)
    A = np.array([[1, 1], [0, 1]])  # Modify based on the system
    B = np.array([[0.5], [1]])
    return A @ x + B @ u

# Function h(x) representing the measurement model
def h(x):
    # Example measurement model (modify for your system)
    return np.array([x[0]])

# Extended Kalman Filter function
def ekf_predict(x, P, u, Q, f, jacobian_f):
    # Prediction step: x = f(x, u), P = J_f * P * J_f.T + Q
    x = f(x, u)
    J_f = jacobian_f(f, x)  # Compute the Jacobian of f with respect to x
    P = J_f @ P @ J_f.T + Q
    return x, P

def ekf_update(x, P, z, R, h, jacobian_h):
    # Update step: y = z - h(x), S = H * P * H.T + R
    y = z - h(x)
    H = jacobian_h(h, x)  # Compute the Jacobian of h with respect to x
    S = H @ P @ H.T + R
    K = P @ H.T @ np.linalg.inv(S)  # Kalman gain
    x = x + K @ y  # Update the state estimate
    P = (np.eye(len(x)) - K @ H) @ P  # Update the error covariance
    return x, P

# Initialization
x = np.array([0, 0])  # Initial state
P = np.eye(2) * 1  # Initial covariance matrix
Q = np.eye(2) * 0.1  # Process noise covariance
R = np.eye(1) * 0.1  # Measurement noise covariance
u = np.array([1])  # Control input (example)
z = np.array([0.5])  # Measurement (example)

# Simulate EKF for one step
x_pred, P_pred = ekf_predict(x, P, u, Q, f, jacobian_matrix)
x_update, P_update = ekf_update(x_pred, P_pred, z, R, h, jacobian_matrix)

# Output the results
print("Predicted State: ", x_pred)
print("Updated State: ", x_update)
print("Updated Covariance Matrix: ", P_update)

P155


import numpy as np

def linear_kf_predict(x, P, A, B, u, Q):
    # State prediction
    x = A @ x + B @ u
    # Covariance prediction
    P = A @ P @ A.T + Q
    return x, P

def linear_kf_update(x, P, z, H, R):
    # Measurement update
    y = z - H @ x  # Residual
    S = H @ P @ H.T + R  # Residual covariance
    K = P @ H.T @ np.linalg.inv(S)  # Kalman gain
    x = x + K @ y  # Updated state
    P = (np.eye(len(x)) - K @ H) @ P  # Updated covariance
    return x, P

# Initialize parameters
x = np.array([0, 0])  # Initial state
P = np.eye(2)  # Covariance matrix
A = np.array([[1, 1], [0, 1]])  # State transition matrix
B = np.array([[0.5], [1]])  # Control matrix
u = np.array([1])  # Control input
H = np.array([[1, 0]])  # Measurement matrix
R = np.eye(1) * 0.1  # Measurement noise covariance
Q = np.eye(2) * 0.1  # Process noise covariance
z = np.array([1])  # Measurement

# Apply Kalman Filter
x_pred, P_pred = linear_kf_predict(x, P, A, B, u, Q)
x_update, P_update = linear_kf_update(x_pred, P_pred, z, H, R)

print("KF Updated State: ", x_update)
print("KF Updated Covariance: ", P_update)

P156


import numpy as np
from scipy.linalg import cholesky

### 1. Linear Kalman Filter (KF)
def linear_kf_predict(x, P, A, B, u, Q):
    # State prediction
    x = A @ x + B @ u
    # Covariance prediction
    P = A @ P @ A.T + Q
    return x, P

def linear_kf_update(x, P, z, H, R):
    # Measurement update
    y = z - H @ x  # Residual
    S = H @ P @ H.T + R  # Residual covariance
    K = P @ H.T @ np.linalg.inv(S)  # Kalman gain
    x = x + K @ y  # Updated state
    P = (np.eye(len(x)) - K @ H) @ P  # Updated covariance
    return x, P

# Initial setup for the Kalman Filter
x = np.array([0, 0])  # Initial state
P = np.eye(2)  # Covariance matrix
A = np.array([[1, 1], [0, 1]])  # State transition matrix
B = np.array([[0.5], [1]])  # Control matrix
u = np.array([1])  # Control input
H = np.array([[1, 0]])  # Measurement matrix
R = np.eye(1) * 0.1  # Measurement noise covariance
Q = np.eye(2) * 0.1  # Process noise covariance
z = np.array([1])  # Measurement

# Apply Linear Kalman Filter
x_pred, P_pred = linear_kf_predict(x, P, A, B, u, Q)
x_update, P_update = linear_kf_update(x_pred, P_pred, z, H, R)

print("KF Updated State: ", x_update)
print("KF Updated Covariance: ", P_update)

### 2. Extended Kalman Filter (EKF)
def f_ekf(x, u):
    return np.array([x[0] + x[1] + u[0], x[1] + u[0]])

def h_ekf(x):
    return np.array([x[0]])

def jacobian_f_ekf(x, u):
    return np.array([[1, 1], [0, 1]])

def jacobian_h_ekf(x):
    return np.array([[1, 0]])

def ekf_predict(x, P, u, Q, f, jacobian_f):
    # State prediction
    x = f(x, u)
    J_f = jacobian_f(x, u)
    P = J_f @ P @ J_f.T + Q
    return x, P

def ekf_update(x, P, z, R, h, jacobian_h):
    # Measurement update
    y = z - h(x)
    J_h = jacobian_h(x)
    S = J_h @ P @ J_h.T + R
    K = P @ J_h.T @ np.linalg.inv(S)
    x = x + K @ y
    P = (np.eye(len(x)) - K @ J_h) @ P
    return x, P

# Apply Extended Kalman Filter
x_pred, P_pred = ekf_predict(x, P, u, Q, f_ekf, jacobian_f_ekf)
x_update, P_update = ekf_update(x_pred, P_pred, z, R, h_ekf, jacobian_h_ekf)

print("EKF Updated State: ", x_update)
print("EKF Updated Covariance: ", P_update)

### 3. Unscented Kalman Filter (UKF)
def sigma_points(x, P, kappa=1):
    n = len(x)
    sigma_pts = np.zeros((2 * n + 1, n))
    W = np.zeros(2 * n + 1)
    sigma_pts[0] = x
    W[0] = kappa / (n + kappa)
    U = cholesky((n + kappa) * P)
    
    for i in range(n):
        sigma_pts[i + 1] = x + U[i]
        sigma_pts[n + i + 1] = x - U[i]
        W[i + 1] = 1 / (2 * (n + kappa))
        W[n + i + 1] = 1 / (2 * (n + kappa))
    
    return sigma_pts, W

def ukf_predict(sigma_pts, W, f, Q):
    n = len(sigma_pts[0])
    x_pred = np.sum([W[i] * f(sigma_pts[i]) for i in range(2 * n + 1)], axis=0)
    P_pred = Q + np.sum([W[i] * np.outer(f(sigma_pts[i]) - x_pred, f(sigma_pts[i]) - x_pred) for i in range(2 * n + 1)], axis=0)
    return x_pred, P_pred

def ukf_update(sigma_pts, W, x_pred, P_pred, z, h, R):
    n = len(sigma_pts[0])
    z_pred = np.sum([W[i] * h(sigma_pts[i]) for i in range(2 * n + 1)], axis=0)
    P_zz = R + np.sum([W[i] * np.outer(h(sigma_pts[i]) - z_pred, h(sigma_pts[i]) - z_pred) for i in range(2 * n + 1)], axis=0)
    P_xz = np.sum([W[i] * np.outer(sigma_pts[i] - x_pred, h(sigma_pts[i]) - z_pred) for i in range(2 * n + 1)], axis=0)
    
    K = P_xz @ np.linalg.inv(P_zz)
    x_update = x_pred + K @ (z - z_pred)
    P_update = P_pred - K @ P_zz @ K.T
    return x_update, P_update

# Generate sigma points and apply Unscented Kalman Filter
sigma_pts, W = sigma_points(x, P)
x_pred, P_pred = ukf_predict(sigma_pts, W, f_ekf, Q)
x_update, P_update = ukf_update(sigma_pts, W, x_pred, P_pred, z, h_ekf, R)

print("UKF Updated State: ", x_update)
print("UKF Updated Covariance: ", P_update)

### 4. Particle Filter (PF)
def particle_filter_predict(particles, weights, f, u, Q):
    n_particles = len(particles)
    particles_pred = np.zeros_like(particles)
    
    for i in range(n_particles):
        particles_pred[i] = f(particles[i], u) + np.random.multivariate_normal([0, 0], Q)
    
    return particles_pred

def particle_filter_update(particles, weights, z, h, R):
    weights = np.array([np.exp(-0.5 * np.linalg.norm(z - h(p))**2 / R[0, 0]) for p in particles])
    weights /= np.sum(weights)
    return weights

def resample(particles, weights):
    indices = np.random.choice(len(particles), size=len(particles), p=weights)
    return particles[indices]

# Initialize particles and weights
n_particles = 100
particles = np.random.randn(n_particles, 2)
weights = np.ones(n_particles) / n_particles
Q = np.eye(2) * 0.1  # Process noise covariance
R = np.eye(1) * 0.1  # Measurement noise covariance
z = np.array([1])  # Measurement

# Particle Filter predict step
particles_pred = particle_filter_predict(particles, weights, f_ekf, u, Q)

# Particle Filter update step
weights = particle_filter_update(particles_pred, weights, z, h_ekf, R)

# Resampling step
particles_resampled = resample(particles_pred, weights)

print("Resampled Particles: ", particles_resampled)

P164


import numpy as np
import matplotlib.pyplot as plt

# Function for Sigma Point Generation using U Transformation
def sigma_points(x, P, kappa):
    """ 
    Japanese: シグマポイントを生成するための関数
    English: Function to generate sigma points using U transformation
    """
    n = len(x)  # Dimension of the state
    sigma_points = np.zeros((2 * n + 1, n))  # Initialize sigma points
    sigma_points[0] = x  # The first sigma point is the mean

    # Compute the square root of the covariance matrix
    sqrt_P = np.linalg.cholesky((n + kappa) * P)

    # Generate the remaining sigma points
    for i in range(n):
        sigma_points[i + 1] = x + sqrt_P[i]
        sigma_points[i + n + 1] = x - sqrt_P[i]

    return sigma_points

# Example system dynamics (linearized for simplicity)
def f(x):
    """ 
    Japanese: システムの状態更新関数
    English: System state update function 
    """
    return np.array([x[0] + x[1], x[1]])

def h(x):
    """ 
    Japanese: 観測関数
    English: Observation function
    """
    return np.array([x[0]])

# Example parameters
n = 2  # State dimension
x = np.array([1.0, 0.5])  # State mean
P = np.array([[1.0, 0.5], [0.5, 2.0]])  # State covariance matrix
kappa = 3 - n  # Scaling parameter

# Generate sigma points
sigma_pts = sigma_points(x, P, kappa)

# Plot Sigma Points
plt.figure()
plt.scatter(sigma_pts[:, 0], sigma_pts[:, 1], color='red', label='Sigma Points')
plt.scatter(x[0], x[1], color='blue', label='Mean')
plt.title('Sigma Points and Mean')
plt.xlabel('State Dimension 1')
plt.ylabel('State Dimension 2')
plt.legend()
plt.grid(True)
plt.show()

# Prediction step
predicted_sigma_pts = np.array([f(s) for s in sigma_pts])

# Compute predicted mean and covariance
w0 = kappa / (n + kappa)  # Weight for the mean
wi = 1 / (2 * (n + kappa))  # Weights for the other sigma points

# Predicted mean
predicted_mean = w0 * predicted_sigma_pts[0] + sum(wi * predicted_sigma_pts[i] for i in range(1, 2 * n + 1))

# Predicted covariance
predicted_cov = w0 * np.outer(predicted_sigma_pts[0] - predicted_mean, predicted_sigma_pts[0] - predicted_mean)
for i in range(1, 2 * n + 1):
    predicted_cov += wi * np.outer(predicted_sigma_pts[i] - predicted_mean, predicted_sigma_pts[i] - predicted_mean)

# Print results
print(f"Predicted mean: {predicted_mean}")
print(f"Predicted covariance:\n {predicted_cov}")

P167


import numpy as np

# Define the initial state (x_0)
x = np.array([[0], [0]])  # Initial state vector

# Define the state transition matrix (A), control matrix (B), and observation matrix (C)
A = np.array([[1, 1], [0, 1]])  # State transition model
B = np.array([[0.5], [1]])      # Control input model
C = np.array([[1, 0]])          # Observation model

# Define the process noise covariance (Q), observation noise covariance (R), and initial estimation error covariance (P)
Q = np.array([[0.1, 0], [0, 0.1]])  # Process noise covariance
R = np.array([[0.1]])               # Observation noise covariance
P = np.array([[1, 0], [0, 1]])      # Initial estimate error covariance

# Define control input (u) and measurements (y)
u = np.array([[2]])  # Control input
y = np.array([[1.1]])  # Measurement

# Kalman filter steps

# Prediction step
x_pred = A @ x + B @ u  # Predicted state estimate
P_pred = A @ P @ A.T + Q  # Predicted estimate covariance

# Update step (Correction step)
K = P_pred @ C.T @ np.linalg.inv(C @ P_pred @ C.T + R)  # Kalman gain
x = x_pred + K @ (y - C @ x_pred)  # Updated state estimate
P = (np.eye(2) - K @ C) @ P_pred  # Updated estimate covariance

# Print results
print("Updated state estimate: \n", x)
print("Updated estimate covariance: \n", P)

P172


import numpy as np

# Unscented Kalman Filter

def sigma_points(mu, cov, kappa):
    """
    Calculate the sigma points based on the mean (mu) and covariance (cov).
    """
    n = len(mu)
    sigma_points = np.zeros((2 * n + 1, n))
    weights = np.zeros(2 * n + 1)
    
    # Scaling parameter
    lambda_ = kappa + n
    
    # First sigma point is the mean
    sigma_points[0] = mu
    weights[0] = lambda_ / (n + lambda_)
    
    # Remaining sigma points
    sqrt_cov = np.linalg.cholesky((n + lambda_) * cov)
    
    for i in range(n):
        sigma_points[i + 1] = mu + sqrt_cov[:, i]
        sigma_points[n + i + 1] = mu - sqrt_cov[:, i]
        weights[i + 1] = 1 / (2 * (n + lambda_))
        weights[n + i + 1] = 1 / (2 * (n + lambda_))
    
    return sigma_points, weights

def unscented_transform(sigma_points, weights):
    """
    Perform the unscented transform to recover the mean and covariance.
    """
    mean = np.dot(weights, sigma_points)
    covariance = np.zeros((len(mean), len(mean)))
    
    for i in range(len(sigma_points)):
        diff = sigma_points[i] - mean
        covariance += weights[i] * np.outer(diff, diff)
    
    return mean, covariance

def predict_sigma_points(sigma_points, f):
    """
    Predict the sigma points based on the motion model.
    """
    return np.array([f(sigma) for sigma in sigma_points])

def unscented_kalman_filter(mu, cov, f, h, z, R, Q, kappa):
    """
    Unscented Kalman Filter for nonlinear state estimation.
    
    Parameters:
    mu   -- Prior mean
    cov  -- Prior covariance
    f    -- State transition function
    h    -- Measurement function
    z    -- Measurement
    R    -- Measurement noise covariance
    Q    -- Process noise covariance
    kappa -- Scaling parameter
    """
    # Generate sigma points
    sigma_points, weights = sigma_points(mu, cov, kappa)
    
    # Predict sigma points
    predicted_sigma_points = predict_sigma_points(sigma_points, f)
    
    # Perform unscented transform to recover predicted state
    mu_bar, cov_bar = unscented_transform(predicted_sigma_points, weights)
    cov_bar += Q  # Add process noise
    
    # Predict measurement
    predicted_measurements = np.array([h(sigma) for sigma in predicted_sigma_points])
    z_pred, S = unscented_transform(predicted_measurements, weights)
    S += R  # Add measurement noise
    
    # Cross-covariance
    P_xz = np.zeros((len(mu), len(z)))
    for i in range(len(sigma_points)):
        P_xz += weights[i] * np.outer(predicted_sigma_points[i] - mu_bar, predicted_measurements[i] - z_pred)
    
    # Kalman gain
    K = np.dot(P_xz, np.linalg.inv(S))
    
    # Update state mean and covariance
    mu_new = mu_bar + np.dot(K, (z - z_pred))
    cov_new = cov_bar - np.dot(np.dot(K, S), K.T)
    
    return mu_new, cov_new

# Example usage of UKF

def state_transition_function(x):
    # Example state transition function
    return x

def measurement_function(x):
    # Example measurement function
    return x

# Initial state
mu = np.array([0, 0])
cov = np.eye(2) * 0.1
R = np.eye(2) * 0.1  # Measurement noise
Q = np.eye(2) * 0.01  # Process noise
z = np.array([1.0, 1.0])  # Measurement
kappa = 0

# Run UKF
mu_new, cov_new = unscented_kalman_filter(mu, cov, state_transition_function, measurement_function, z, R, Q, kappa)

print("Updated State Mean: ", mu_new)
print("Updated Covariance: ", cov_new)




1
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
1
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?