0
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

システム同定メモPythonコード

Posted at
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from scipy.optimize import curve_fit
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression

# Data generation
np.random.seed(0)
time = np.linspace(0, 10, 100)
u = np.sin(time) + 0.1 * np.random.randn(len(time))
e = 0.5 * np.random.randn(len(time))
true_K = 2.0
true_tau = 3.0
sys_true = signal.lti([true_K], [true_tau, 1])
_, y, _ = signal.lsim(sys_true, u, time)
y += e

# 1. First-Order Transfer Function Identification
def first_order_model(t, K, tau):
    sys = signal.lti([K], [tau, 1])
    _, y_model, _ = signal.lsim(sys, u, t)
    return y_model

# Fit the first-order model
params, _ = curve_fit(first_order_model, time, y, p0=[1, 1])
K_fit, tau_fit = params

print(f'First-Order Transfer Function - Estimated Gain (K): {K_fit}')
print(f'First-Order Transfer Function - Estimated Time Constant (τ): {tau_fit}')

plt.figure()
plt.plot(time, y, label='Actual Output')
plt.plot(time, first_order_model(time, K_fit, tau_fit), '--', label='Fitted Model')
plt.xlabel('Time')
plt.ylabel('Output')
plt.title('First-Order Transfer Function Identification')
plt.legend()
plt.grid(True)

# 2. ARX Model Identification
p = 2  # Number of output lags
q = 2  # Number of input lags

# Prepare data for ARX model
X = np.zeros((len(y) - max(p, q), p + q))
y_lagged = y[max(p, q):]
for i in range(max(p, q)):
    X[:, i] = y[i:len(y) - max(p, q) + i]
for i in range(q):
    X[:, p + i] = u[i:len(u) - max(p, q) + i]

# Fit the ARX model
model = LinearRegression().fit(X, y_lagged)
a = model.coef_[:p]
b = model.coef_[p:]

print(f'ARX Model - Coefficients (a): {a}')
print(f'ARX Model - Coefficients (b): {b}')

y_pred_arx = np.dot(X, model.coef_) + model.intercept_

plt.figure()
plt.plot(time[max(p, q):], y_lagged, label='Actual Output')
plt.plot(time[max(p, q):], y_pred_arx, '--', label='Fitted ARX Model')
plt.xlabel('Time')
plt.ylabel('Output')
plt.title('ARX Model Identification')
plt.legend()
plt.grid(True)

# 3. ARMAX Model Identification
armax_model = sm.tsa.SARIMAX(y, exog=u, order=(p, 0, q))
armax_results = armax_model.fit()

print(armax_results.summary())

plt.figure()
plt.plot(time, y, label='Actual Output')
plt.plot(time, armax_results.fittedvalues, '--', label='Fitted ARMAX Model')
plt.xlabel('Time')
plt.ylabel('Output')
plt.title('ARMAX Model Identification')
plt.legend()
plt.grid(True)

# 4. OE Model Identification
oe_model = sm.tsa.SARIMAX(y, exog=u, order=(2, 0, 2))
oe_results = oe_model.fit()

print(oe_results.summary())

plt.figure()
plt.plot(time, y, label='Actual Output')
plt.plot(time, oe_results.fittedvalues, '--', label='Fitted OE Model')
plt.xlabel('Time')
plt.ylabel('Output')
plt.title('OE Model Identification')
plt.legend()
plt.grid(True)

plt.show()



import numpy as np
import matplotlib.pyplot as plt
from scipy import signal

# Generate synthetic data
np.random.seed(0)
time = np.linspace(0, 10, 1000)
u = np.sin(2 * np.pi * 1 * time) + 0.1 * np.random.randn(len(time))  # Input signal
true_K = 2.0
true_tau = 3.0
sys_true = signal.lti([true_K], [true_tau, 1])
_, y, _ = signal.lsim(sys_true, u, time)
y += 0.5 * np.random.randn(len(time))  # Add noise to output

# Frequency response estimation
# Compute FFT
freqs = np.fft.fftfreq(len(time), time[1] - time[0])
U = np.fft.fft(u)
Y = np.fft.fft(y)

# Remove negative frequencies
pos_freqs = freqs[freqs >= 0]
U_pos = U[freqs >= 0]
Y_pos = Y[freqs >= 0]

# Compute frequency response
H = Y_pos / U_pos

# Plot the frequency response
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(pos_freqs, 20 * np.log10(np.abs(H)))
plt.xlabel('Frequency [Hz]')
plt.ylabel('Magnitude [dB]')
plt.title('Frequency Response')

plt.subplot(2, 1, 2)
plt.plot(pos_freqs, np.angle(H))
plt.xlabel('Frequency [Hz]')
plt.ylabel('Phase [radians]')
plt.title('Phase Response')

plt.tight_layout()
plt.grid(True)
plt.show()




import numpy as np
import matplotlib.pyplot as plt
from scipy import signal

# Generate synthetic data
np.random.seed(0)
time = np.linspace(0, 10, 1000)
u = np.zeros(len(time))
u[100] = 1  # Impulse input
true_K = 2.0
true_tau = 3.0
sys_true = signal.lti([true_K], [true_tau, 1])
_, y, _ = signal.lsim(sys_true, u, time)
y += 0.1 * np.random.randn(len(time))  # Add noise to output

# Plot the impulse response
plt.figure()
plt.plot(time, y)
plt.xlabel('Time [s]')
plt.ylabel('Output')
plt.title('Impulse Response')
plt.grid(True)
plt.show()



import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline

# Generate synthetic data
np.random.seed(0)
time = np.linspace(0, 10, 1000)
u = np.sin(2 * np.pi * 1 * time) + 0.1 * np.random.randn(len(time))  # Input signal
true_K = 2.0
true_tau = 3.0
sys_true = signal.lti([true_K], [true_tau, 1])
_, y, _ = signal.lsim(sys_true, u, time)
y += 0.5 * np.random.randn(len(time))  # Add noise to output

# Prepare data for SVR
X = np.column_stack([u])
y = y

# Scale data
scaler_X = StandardScaler()
scaler_y = StandardScaler()
X_scaled = scaler_X.fit_transform(X)
y_scaled = scaler_y.fit_transform(y.reshape(-1, 1)).ravel()

# Define and train the SVR model
svr_model = make_pipeline(StandardScaler(), SVR(kernel='rbf', C=1e3, epsilon=0.1))
svr_model.fit(X_scaled, y_scaled)

# Predict
y_pred_scaled = svr_model.predict(X_scaled)
y_pred = scaler_y.inverse_transform(y_pred_scaled.reshape(-1, 1))

# Plot the results
plt.figure()
plt.plot(time, y, label='Actual Output')
plt.plot(time, y_pred, '--', label='Predicted Output by SVR')
plt.xlabel('Time')
plt.ylabel('Output')
plt.title('Support Vector Regression System Identification')
plt.legend()
plt.grid(True)
plt.show()


import numpy as np
import matplotlib.pyplot as plt
from sklearn.kernel_ridge import KernelRidge

# データ生成(1次遅れ系の例)
np.random.seed(0)
X = np.linspace(0, 10, 100).reshape(-1, 1)
tau = 1.0
K = 1.0
y = K * (1 - np.exp(-X / tau)) + np.random.normal(0, 0.1, X.shape)

# カーネル回帰モデルの定義
model = KernelRidge(kernel='rbf', alpha=1.0)

# モデルのトレーニング
model.fit(X, y)

# 予測
y_pred = model.predict(X)

# プロット
plt.figure(figsize=(10, 6))
plt.scatter(X, y, color='blue', label='True data')
plt.plot(X, y_pred, color='red', label='Kernel Ridge Regression')
plt.title('System Identification with Kernel Ridge Regression')
plt.xlabel('Input')
plt.ylabel('Output')
plt.legend()
plt.grid(True)
plt.show()





import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor

# データ生成(1次遅れ系の例)
np.random.seed(0)
X = np.linspace(0, 10, 100).reshape(-1, 1)
tau = 1.0
K = 1.0
y = K * (1 - np.exp(-X / tau)) + np.random.normal(0, 0.1, X.shape)

# k-近傍回帰モデルの定義
model = KNeighborsRegressor(n_neighbors=5)

# モデルのトレーニング
model.fit(X, y)

# 予測
y_pred = model.predict(X)

# プロット
plt.figure(figsize=(10, 6))
plt.scatter(X, y, color='blue', label='True data')
plt.plot(X, y_pred, color='red', label='k-Nearest Neighbors Regression')
plt.title('System Identification with k-Nearest Neighbors')
plt.xlabel('Input')
plt.ylabel('Output')
plt.legend()
plt.grid(True)
plt.show()




import numpy as np

# パラメータの設定
population_size = 10
num_generations = 100  # 変更:世代数を100に設定
mutation_rate = 0.1
chromosome_length = 8  # ビット列の長さ
crossover_rate = 0.7

# 適応度関数(最小化のため逆にする)
def fitness(x):
    return (x - 5)**2

# 初期集団の生成
def initialize_population():
    return np.random.randint(0, 2, (population_size, chromosome_length))

# ビット列を10進数に変換
def decode(chromosome):
    return int(''.join(map(str, chromosome)), 2)

# 適応度を計算(逆の適応度)
def calculate_fitness(population):
    decoded = np.array([decode(chromosome) for chromosome in population])
    return np.array([1 / (1 + fitness(x)) for x in decoded])

# 選択 (ルーレット選択)
def select(population, fitness_values):
    probabilities = fitness_values / fitness_values.sum()
    indices = np.random.choice(range(population_size), size=population_size, p=probabilities)
    return population[indices]

# 交叉
def crossover(parent1, parent2):
    point = np.random.randint(1, chromosome_length - 1)
    child1 = np.concatenate([parent1[:point], parent2[point:]])
    child2 = np.concatenate([parent2[:point], parent1[point:]])
    return child1, child2

# 突然変異
def mutate(chromosome):
    for i in range(chromosome_length):
        if np.random.rand() < mutation_rate:
            chromosome[i] = 1 - chromosome[i]
    return chromosome

# メインの遺伝的アルゴリズム
def genetic_algorithm():
    population = initialize_population()
    for generation in range(num_generations):
        fitness_values = calculate_fitness(population)
        population = select(population, fitness_values)
        
        # 交叉
        new_population = []
        while len(new_population) < population_size:
            parent1, parent2 = population[np.random.choice(range(population_size), size=2, replace=False)]
            if np.random.rand() < crossover_rate:
                child1, child2 = crossover(parent1, parent2)
                new_population.append(mutate(child1))
                new_population.append(mutate(child2))
            else:
                new_population.append(mutate(parent1))
                new_population.append(mutate(parent2))
        
        population = np.array(new_population)[:population_size]
        
        # 最良個体の表示
        best_individual = population[np.argmax(fitness_values)]
        best_value = decode(best_individual)
        print(f"Generation {generation + 1}: Best Value = {best_value}, Fitness = {1 / (1 + fitness(best_value))}")

genetic_algorithm()






0
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?