1
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

電気電子半導体回路とAI機械学習統計学

Last updated at Posted at 2025-04-16

電気電子工学と機械学習を融合させたPythonコードを、ここにまとめておきます。

# プログラム名: ohms_law_optimizer_compare.py
# 概要: オームの法則 V = IR をSGDとAdamの両方で学習し、エポックごとに損失を表示し、損失曲線とパラメータの収束を比較

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt

# --- データ生成 / Generate synthetic data ---
torch.manual_seed(42)
R_true = 5.0
I = torch.linspace(0, 10, 100).unsqueeze(1)                 # 電流 I
V = R_true * I + torch.randn_like(I) * 1.0                  # 電圧 V(ノイズ付き)
dataset = TensorDataset(I, V)
train_loader = DataLoader(dataset, batch_size=16, shuffle=True)

# --- 線形モデル / Define linear regression model ---
class OhmsLawModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.linear = nn.Linear(1, 1)

    def forward(self, x):
        return self.linear(x)

# --- 学習関数(エポック表示あり)/ Training function with verbose ---
def train(model, dataloader, optimizer, criterion, epochs=100, verbose=True):
    loss_list = []
    for epoch in range(epochs):
        model.train()
        epoch_loss = 0
        for I_batch, V_batch in dataloader:
            pred = model(I_batch)
            loss = criterion(pred, V_batch)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
        avg_loss = epoch_loss / len(dataloader)
        loss_list.append(avg_loss)
        if verbose:
            print(f"[Epoch {epoch:3d}] Loss: {avg_loss:.4f}")
    return loss_list, model.linear.weight.item(), model.linear.bias.item()

# --- 損失関数 / Loss function ---
criterion = nn.MSELoss()

# --- モデルとオプティマイザの準備(SGD)---
model_sgd = OhmsLawModel()
optimizer_sgd = optim.SGD(model_sgd.parameters(), lr=0.01)
loss_sgd, w_sgd, b_sgd = train(model_sgd, train_loader, optimizer_sgd, criterion, epochs=200, verbose=True)

# --- モデルとオプティマイザの準備(Adam)---
model_adam = OhmsLawModel()
optimizer_adam = optim.Adam(model_adam.parameters(), lr=0.01)
loss_adam, w_adam, b_adam = train(model_adam, train_loader, optimizer_adam, criterion, epochs=200, verbose=True)

# --- 結果表示 / Print final learned parameters ---
print(f"\n[SGD]  Learned: V = {w_sgd:.3f} * I + {b_sgd:.3f}")
print(f"[Adam] Learned: V = {w_adam:.3f} * I + {b_adam:.3f}")

# --- 可視化 / Plot results ---
plt.figure(figsize=(14, 5))

# --- 電流 vs 電圧(学習後モデル)/ I vs V fit ---
plt.subplot(1, 2, 1)
plt.scatter(I.numpy(), V.numpy(), label="Observed V", alpha=0.5)
plt.plot(I.numpy(), model_sgd(I).detach().numpy(), label="SGD", color="blue")
plt.plot(I.numpy(), model_adam(I).detach().numpy(), label="Adam", color="red")
plt.xlabel("Current (I) [A]")
plt.ylabel("Voltage (V) [V]")
plt.title("Ohm's Law Regression Fit")
plt.legend()
plt.grid(True)

# --- 損失推移(収束比較)/ Loss over Epochs ---
plt.subplot(1, 2, 2)
plt.plot(loss_sgd, label="SGD", color="blue")
plt.plot(loss_adam, label="Adam", color="red")
plt.xlabel("Epoch")
plt.ylabel("MSE Loss")
plt.title("Training Loss Curve Comparison")
plt.legend()
plt.grid(True)

plt.tight_layout()
plt.show()

# プログラム名: logistic_nn_cmos_inverter.py
# 概要: ロジスティック関数でCMOSインバータの出力電圧を近似し、
#       ニューラルネットワークを用いてその特性と微分を学習・予測する。
#       ゲインは倍とdBで表示する。

import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

# --- 定数設定 / Constants ---
Vdd = 5.0       # 電源電圧 [V]
V_Tn = 1.0      # NMOS しきい値電圧 [V]
V_Tp = -1.0     # PMOS しきい値電圧 [V]
gain = 1.0      # ゲイン(ロジスティック関数用)
Vth = 2.5       # CMOSインバータのスレッショルド電圧

# --- ゲイン表示(倍とdB)/ Display gain in linear and dB ---
gain_dB = 20 * np.log10(gain) if gain > 0 else float('-inf')
print(f"ゲイン(倍)Gain = {gain:.2f}")
print(f"ゲイン(dB)Gain = {gain_dB:.2f} dB")

# --- ロジスティック関数定義(CMOSインバーター+-1倍の増幅) / Logistic function ---
def logistic_inverter(Vin, Vdd, Vth, gain):
    return Vdd * (1 / (1 + np.exp(-gain * (Vth - Vin))))

# --- 入力電圧と対応する出力を計算 / Generate dataset ---
Vin = np.linspace(0, Vdd, 1000)
Vout = logistic_inverter(Vin, Vdd, Vth, gain)

# --- データの標準化 / Standardize input & output ---
scaler_X = StandardScaler()
scaler_y = StandardScaler()
Vin_scaled = scaler_X.fit_transform(Vin.reshape(-1, 1))
Vout_scaled = scaler_y.fit_transform(Vout.reshape(-1, 1))

# --- ニューラルネットワーク構築 / Neural Network model ---
model = Sequential()
model.add(Dense(64, input_dim=1, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(1))  # 出力層(活性化なし)

# --- モデルのコンパイル / Compile model ---
model.compile(optimizer='adam', loss='mean_squared_error')

# --- モデル学習 / Train model ---
model.fit(Vin_scaled, Vout_scaled, epochs=200, batch_size=16, verbose=0)

# --- 予測と逆標準化 / Predict and inverse-transform ---
Vout_pred_scaled = model.predict(Vin_scaled)
Vout_pred = scaler_y.inverse_transform(Vout_pred_scaled)

# --- 微分(数値微分) / Compute derivative ---
Vout_derivative = np.gradient(Vout_pred.ravel(), Vin)

# --- 可視化 / Plot results ---
plt.figure(figsize=(12, 6))

# 入力-出力カーブ
plt.subplot(1, 2, 1)
plt.plot(Vin, Vout, label='True Logistic Function', color='blue')
plt.plot(Vin, Vout_pred, label='NN Prediction', color='orange', linestyle='--')
plt.title('Logistic Function and Neural Network Regression')
plt.xlabel('Input Voltage (Vin) [V]')
plt.ylabel('Output Voltage (Vout) [V]')
plt.grid(True)
plt.axvline(x=Vdd / 2, color='green', linestyle='--', label='Vin = Vdd/2')
plt.legend()

# 導関数のプロット
plt.subplot(1, 2, 2)
plt.plot(Vin, Vout_derivative, label='dVout/dVin (Derivative)', color='red')
plt.title('Derivative of Logistic Function Approximation')
plt.xlabel('Input Voltage (Vin) [V]')
plt.ylabel('dVout/dVin [V/V]')
plt.grid(True)
plt.axvline(x=Vdd / 2, color='green', linestyle='--', label='Vin = Vdd/2')
plt.legend()

plt.tight_layout()
plt.show()

#プログラム名: logic_nand_nn_full.py
# 概要: NANDゲートだけで各論理回路を構成 + XORをNNで学習し可視化

# --- numpyとtensorflowのバージョン整合を取る(Google Colabなどで使用) ---
!pip uninstall -y numpy
!pip install numpy==1.23.5 --quiet
!pip install tensorflow --upgrade --quiet

# --- ライブラリのインポート / Import libraries ---
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

# --- NANDゲートの定義 / Define NAND gate ---
def NAND(a, b):
    return int(not (a and b))

# --- 各論理ゲートのNANDによる構成 / Logic gates from NAND ---
def NOT(a):
    return NAND(a, a)

def AND(a, b):
    return NOT(NAND(a, b))

def OR(a, b):
    return NAND(NOT(a), NOT(b))

def NOR(a, b):
    return NOT(OR(a, b))

def XOR(a, b):
    t1 = NAND(a, b)
    t2 = NAND(a, t1)
    t3 = NAND(b, t1)
    return NAND(t2, t3)

def XNOR(a, b):
    return NOT(XOR(a, b))

# --- ゲートをテストする関数 / Test logic gate ---
def test_gate(name, func, inputs):
    print(f"\n--- {name} ---")
    for a, b in inputs:
        output = func(a, b)
        print(f"A: {a}, B: {b} => Output: {output}")

# --- 入力定義 / Define input sets ---
inputs_2bit = [(0,0), (0,1), (1,0), (1,1)]
inputs_1bit = [(0,0), (1,1)]  # NOT用

# --- 論理ゲートの動作確認 / Test all gates ---
print(" Logic Gates Using Only NAND:")
test_gate("NAND", NAND, inputs_2bit)
test_gate("NOT (A only)", lambda a, b: NOT(a), inputs_1bit)
test_gate("AND", AND, inputs_2bit)
test_gate("OR", OR, inputs_2bit)
test_gate("NOR", NOR, inputs_2bit)
test_gate("XOR", XOR, inputs_2bit)
test_gate("XNOR", XNOR, inputs_2bit)

# --- XOR用データセット / XOR dataset for NN ---
X = np.array([[0,0],[0,1],[1,0],[1,1]])  # 入力
y = np.array([[0],[1],[1],[0]])          # 出力

# --- パーセプトロン構築 / Build NN model for XOR ---
model = Sequential()
model.add(Dense(2, input_dim=2, activation='relu'))  # 中間層 / Hidden layer
model.add(Dense(1, activation='sigmoid'))            # 出力層 / Output layer

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X, y, epochs=1000, verbose=0)  # 学習曲線を記録 / Record training history

# --- 予測結果表示 / Show predictions ---
print("\n XOR learned by Neural Network:")
preds = model.predict(X)
for i in range(len(X)):
    pred = int(preds[i][0] > 0.5)
    print(f"Input: {X[i].tolist()} => Predicted: {pred}, True: {y[i][0]}")

# --- 学習曲線のプロット / Plot training loss ---
plt.figure(figsize=(8, 4))
plt.plot(history.history['loss'], label='Training Loss')
plt.title("XOR Learning Loss by Neural Network")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()

# プログラム名: ramp_sample_hold_nn_regression_adc.py
# 概要: ランプ波をクロック周期でサンプルホールドし、ニューラルネットワークで回帰、8bit AD変換も行う

import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

# --- パラメータ設定 / Parameters ---
fs = 200              # クロック周波数 [Hz] (軽量化)
T = 1.0               # 波形持続時間 [s]
samples = int(fs * T) # サンプル数
t = np.linspace(0, T, samples)
Vmax = 5.0            # ランプ波最大電圧 [V]
ramp = Vmax * t / T   # ランプ波

# --- サンプルホールド / Sample-and-Hold ---
clock_period = 0.1
clock_ticks = np.arange(0, T, clock_period)
hold_indices = (clock_ticks * fs).astype(int)
sampled_t = t[hold_indices]
sampled_v = ramp[hold_indices]

# --- 入力のスケーリング / Standardize time and voltage ---
scaler_x = StandardScaler()
scaler_y = StandardScaler()
X = scaler_x.fit_transform(sampled_t.reshape(-1, 1))
y = scaler_y.fit_transform(sampled_v.reshape(-1, 1))

# --- ニューラルネットワーク構築 / Build NN model ---
model = Sequential([
    Dense(32, input_dim=1, activation='relu'),
    Dense(64, activation='relu'),
    Dense(32, activation='relu'),
    Dense(1)
])
model.compile(optimizer='adam', loss='mse')
model.fit(X, y, epochs=200, batch_size=8, verbose=0)

# --- 予測 / Predict from NN ---
X_pred = scaler_x.transform(sampled_t.reshape(-1, 1))
y_pred_scaled = model.predict(X_pred)
y_pred = scaler_y.inverse_transform(y_pred_scaled).ravel()

# --- AD変換(8bit) / Apply 8-bit ADC ---
adc_bits = 8
adc_levels = 2 ** adc_bits
adc_step = Vmax / adc_levels
adc_output = np.round(y_pred / adc_step).astype(int)

# --- プロット / Plot Results ---
plt.figure(figsize=(12, 6))

# 時系列の回帰結果
plt.subplot(1, 2, 1)
plt.plot(t, ramp, label='Original Ramp', color='gray')
plt.plot(sampled_t, sampled_v, 'bo', label='Sample-Hold')
plt.plot(sampled_t, y_pred, 'r--', label='NN Regression')
plt.title("Ramp + NN Regression")
plt.xlabel("Time [s]")
plt.ylabel("Voltage [V]")
plt.grid(True)
plt.legend()

# ADC出力
plt.subplot(1, 2, 2)
plt.step(sampled_t, adc_output, where='post', color='purple', label='8-bit ADC')
plt.title("Quantized ADC Output")
plt.xlabel("Time [s]")
plt.ylabel("Digital Code")
plt.grid(True)
plt.legend()

plt.tight_layout()
plt.show()

# プログラム名: amplifier_circuits_nn_regression_with_noise.py
# 概要: 反転増幅回路・増幅回路・バッファ回路にノイズを加えて、NNで回帰し、ゲインをdBで表示

import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

# --- 回路モデル関数 / Circuit functions ---
def inverting_amplifier(vin, gain=-5):
    return gain * vin

def non_inverting_amplifier(vin, gain=6):
    return gain * vin

def buffer_amplifier(vin):
    return vin

# --- 入力電圧とノイズ設定 / Input and noise ---
np.random.seed(0)
vin = np.linspace(-1, 1, 100)
noise_std = 0.1
noise = np.random.normal(0, noise_std, size=vin.shape)

# --- 各回路の出力(ノイズ付き)/ Noisy outputs ---
vout_inv = inverting_amplifier(vin) + noise
vout_noninv = non_inverting_amplifier(vin) + noise
vout_buf = buffer_amplifier(vin) + noise

# --- 回路データまとめる / Combine all into a dataset ---
vin_all = np.concatenate([vin]*3)
vout_all = np.concatenate([vout_inv, vout_noninv, vout_buf])
circuit_type = np.array([0]*100 + [1]*100 + [2]*100).reshape(-1, 1)  # 0:反転, 1:非反転, 2:バッファ

# --- 特徴量とラベル / Features and labels ---
X = np.column_stack([vin_all, circuit_type])
y = vout_all

# --- 標準化 / Scaling ---
scaler_X = StandardScaler()
scaler_y = StandardScaler()
X_scaled = scaler_X.fit_transform(X)
y_scaled = scaler_y.fit_transform(y.reshape(-1, 1))

# --- ニューラルネットワーク / Neural network ---
model = Sequential([
    Dense(32, input_dim=2, activation='relu'),
    Dense(64, activation='relu'),
    Dense(32, activation='relu'),
    Dense(1)
])
model.compile(optimizer='adam', loss='mse')
model.fit(X_scaled, y_scaled, epochs=300, batch_size=32, verbose=0)

# --- 予測と逆変換 / Predict and inverse scale ---
y_pred_scaled = model.predict(X_scaled)
y_pred = scaler_y.inverse_transform(y_pred_scaled).ravel()

# --- ゲイン推定とdB変換 / Gain estimation and dB display ---
for label, name in zip([0, 1, 2], ['Inverting', 'Non-inverting', 'Buffer']):
    idx = (circuit_type.ravel() == label)
    vin_section = vin_all[idx]
    vout_section = y_pred[idx]
    gain_linear = np.polyfit(vin_section, vout_section, 1)[0]
    gain_db = 20 * np.log10(abs(gain_linear)) if gain_linear != 0 else -np.inf
    print(f"{name} Amplifier Gain: {gain_linear:.3f} ({gain_db:.2f} dB)")

# --- 平均二乗誤差表示 / Show MSE ---
mse = mean_squared_error(y, y_pred)
print(f"\n全体の平均二乗誤差 MSE: {mse:.6f}")

# プログラム名: mosfet_channel_length_modulation_regression.py
# 概要: チャネル長変調のあるMOSFET出力電流を回帰モデルで近似する

import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline

# --- MOSFETパラメータ設定 / Set MOSFET parameters ---
mu = 400e-4          # 移動度 [m^2/Vs]
C_ox = 3.45e-3       # 酸化膜容量 [F/m^2]
W = 10e-6            # チャンネル幅 [m]
L = 1e-6             # チャンネル長 [m]
Vth = 1.0            # しきい値電圧 [V]
Vgs = 2.0            # ゲート-ソース電圧 [V]
lambda_ = 0.02       # チャネル長変調係数

# --- Vdsの範囲とIdの計算(飽和領域)/ Compute Id vs Vds ---
Vds = np.linspace(0, 5, 100)
Id = 0.5 * mu * C_ox * (W / L) * (Vgs - Vth)**2 * (1 + lambda_ * Vds)

# --- 回帰モデル(多項式)/ Polynomial regression model ---
degree = 2
model = make_pipeline(PolynomialFeatures(degree), LinearRegression())
model.fit(Vds.reshape(-1, 1), Id)

# --- 予測 / Predict fitted curve ---
Id_pred = model.predict(Vds.reshape(-1, 1))

# --- プロット / Plot ---
plt.figure(figsize=(8, 5))
plt.plot(Vds, Id, label="True $I_D$ (with channel modulation)", color='blue')
plt.plot(Vds, Id_pred, '--', label=f"Polynomial Regression (degree={degree})", color='orange')
plt.xlabel("$V_{DS}$ [V]")
plt.ylabel("$I_D$ [A]")
plt.title("MOSFET Output with Channel Length Modulation")
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()

# --- モデル評価 / Show regression coefficients ---
coefs = model.named_steps['linearregression'].coef_
intercept = model.named_steps['linearregression'].intercept_
print(f"\n回帰式係数 (degree={degree}):")
print(f"y = {intercept:.4e} + " + " + ".join([f"{coefs[i]:.4e}*Vds^{i}" for i in range(1, degree+1)]))

# プログラム名: control_bode_nn_regression.py
# 概要: 制御工学のボード線図(ゲイン・位相)を機械学習モデルで近似する

import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

# --- システム定義 / Transfer Function (First-order system) ---
K = 2.0    # ゲイン
tau = 0.5  # 時定数
num = [K]
den = [tau, 1]
system = signal.TransferFunction(num, den)

# --- 周波数範囲 / Frequency range for Bode plot ---
w = np.logspace(-1, 2, 300)  # 周波数(rad/s)
w, mag, phase = signal.bode(system, w)

# --- 特徴量とターゲット準備 / Feature: w, Target: mag & phase ---
X = np.log10(w).reshape(-1, 1)  # 特徴量(log周波数)
y_mag = mag                    # ゲイン[dB]
y_phase = phase                # 位相[deg]

# --- スケーリング(回帰用) / Standardize ---
scaler_X = StandardScaler()
X_scaled = scaler_X.fit_transform(X)

scaler_mag = StandardScaler()
y_mag_scaled = scaler_mag.fit_transform(y_mag.reshape(-1, 1))

scaler_phase = StandardScaler()
y_phase_scaled = scaler_phase.fit_transform(y_phase.reshape(-1, 1))

# --- ニューラルネットワーク構築 / Gain Model ---
def build_model():
    model = Sequential([
        Dense(64, activation='relu', input_dim=1),
        Dense(64, activation='relu'),
        Dense(1)
    ])
    model.compile(optimizer='adam', loss='mse')
    return model

model_mag = build_model()
model_phase = build_model()

# --- 学習 / Train models ---
model_mag.fit(X_scaled, y_mag_scaled, epochs=200, batch_size=16, verbose=0)
model_phase.fit(X_scaled, y_phase_scaled, epochs=200, batch_size=16, verbose=0)

# --- 予測と逆変換 / Predict and inverse transform ---
y_mag_pred = scaler_mag.inverse_transform(model_mag.predict(X_scaled))
y_phase_pred = scaler_phase.inverse_transform(model_phase.predict(X_scaled))

# --- 可視化 / Plot Bode & NN ---
plt.figure(figsize=(12, 6))

# --- ゲイン / Magnitude ---
plt.subplot(2, 1, 1)
plt.semilogx(w, mag, label='True Gain [dB]', color='blue')
plt.semilogx(w, y_mag_pred, '--', label='NN Gain [dB]', color='orange')
plt.ylabel("Gain [dB]")
plt.title("Bode Plot - Gain")
plt.grid(True)
plt.legend()

# --- 位相 / Phase ---
plt.subplot(2, 1, 2)
plt.semilogx(w, phase, label='True Phase [deg]', color='green')
plt.semilogx(w, y_phase_pred, '--', label='NN Phase [deg]', color='red')
plt.xlabel("Frequency [rad/s]")
plt.ylabel("Phase [deg]")
plt.title("Bode Plot - Phase")
plt.grid(True)
plt.legend()

plt.tight_layout()
plt.show()

# 再実行: FFT異常検知 + サイン波プロットを含む完全コード

import numpy as np
import matplotlib.pyplot as plt
from scipy.fft import fft, fftfreq

# --- パラメータ設定 ---
fs = 1000  # サンプリング周波数 [Hz]
T = 1      # 信号長 [秒]
N = fs * T # サンプル数
t = np.linspace(0, T, N, endpoint=False)

# --- 正常信号: 100Hzサイン波 ---
signal_normal = np.sin(2 * np.pi * 100 * t)

# --- 異常信号: 100Hz + 300Hz 成分追加 ---
signal_abnormal = np.sin(2 * np.pi * 100 * t) + 0.5 * np.sin(2 * np.pi * 300 * t)

# --- FFT関数 ---
def compute_fft(signal, fs):
    fft_vals = np.abs(fft(signal)) / len(signal)
    freqs = fftfreq(len(signal), 1/fs)
    half = len(signal) // 2
    return freqs[:half], fft_vals[:half]

# --- FFT計算 ---
freq_normal, fft_normal = compute_fft(signal_normal, fs)
freq_abnormal, fft_abnormal = compute_fft(signal_abnormal, fs)

# --- 閾値による異常検知 ---
threshold_freq = 250  # Hz
threshold_amp = 0.1   # 振幅
abnormal_detected = np.any((freq_abnormal > threshold_freq) & (fft_abnormal > threshold_amp))

# --- 異常検知結果出力 ---
print("⚠ 異常検知結果:", "異常あり!" if abnormal_detected else "正常です。")

# --- プロット ---
plt.figure(figsize=(14, 8))

# 時間波形プロット(正常と異常)
plt.subplot(2, 2, 1)
plt.plot(t, signal_normal, label='Normal Signal')
plt.title("Normal Time-Domain Signal")
plt.xlabel("Time [s]")
plt.ylabel("Amplitude")
plt.grid(True)

plt.subplot(2, 2, 2)
plt.plot(t, signal_abnormal, label='Abnormal Signal', color='orange')
plt.title("Abnormal Time-Domain Signal")
plt.xlabel("Time [s]")
plt.ylabel("Amplitude")
plt.grid(True)

# FFTプロット(正常と異常)
plt.subplot(2, 2, 3)
plt.plot(freq_normal, fft_normal, label='Normal FFT')
plt.title("FFT of Normal Signal")
plt.xlabel("Frequency [Hz]")
plt.ylabel("Amplitude")
plt.grid(True)

plt.subplot(2, 2, 4)
plt.plot(freq_abnormal, fft_abnormal, label='Abnormal FFT', color='orange')
plt.axvline(threshold_freq, color='red', linestyle='--', label='Threshold Freq')
plt.axhline(threshold_amp, color='green', linestyle='--', label='Threshold Amp')
plt.title("FFT of Abnormal Signal")
plt.xlabel("Frequency [Hz]")
plt.ylabel("Amplitude")
plt.grid(True)
plt.legend()

plt.tight_layout()
plt.show()

import numpy as np
import matplotlib.pyplot as plt

# cmos_inverter_sigmoid_activation_detailed.py


# --- シグモイド関数 / Sigmoid function ---
def sigmoid(x):
    """
    シグモイド活性化関数
    Sigmoid activation function
    入力 x に対して、0~1の範囲に出力を圧縮する関数
    """
    return 1 / (1 + np.exp(-x))

# --- 入力電圧(5つ)/ Input voltages (5 values) ---
V_in = np.array([0.01, 0.03, 0.05, 0.07, 0.009])  # [V]
# ニューロンへの入力を模倣 / Simulate voltage inputs to a neuron

# --- 増幅率(重み)/ Amplification factors (weights) ---
weights = np.array([5.0, 4.0, 3.0, 2.0, 1.0])
# 各入力に対する重み(ゲイン)を指定 / Weights (gain) for each input

# --- 増幅率をデシベルに変換 / Convert gain to dB ---
weights_dB = 20 * np.log10(weights)  # dB = 20 * log10(gain)

# --- dB値の表示 / Display gain in dB ---
print("=== Gain in dB ===")
for i, (w, db) in enumerate(zip(weights, weights_dB)):
    print(f"Weight[{i}] = {w:.2f}{db:.2f} dB")

# --- CMOS特性パラメータ / CMOS characteristic parameters ---
Vdd = 1.0       # 電源電圧 / Power supply voltage [V]
V_Tn = 1.0      # NMOSしきい値電圧 / Threshold voltage for NMOS
V_Tp = -1.0     # PMOSしきい値電圧 / Threshold voltage for PMOS

mu_n = 600e-4   # 電子移動度 / Electron mobility (NMOS)
mu_p = 300e-4   # 正孔移動度 / Hole mobility (PMOS)
C_ox = 3.45e-3  # 酸化膜容量 / Gate oxide capacitance

# --- トランジスタ寸法 / Transistor dimensions ---
W_n, L_n = 10e-6, 2e-6  # NMOSのチャネル幅と長さ / NMOS width and length
W_p, L_p = 20e-6, 2e-6  # PMOSのチャネル幅と長さ / PMOS width and length

# --- βの計算(プロセス定数)/ Calculate beta parameters ---
beta_n = mu_n * C_ox * (W_n / L_n)  # NMOSのβ値 / NMOS beta
beta_p = mu_p * C_ox * (W_p / L_p)  # PMOSのβ値 / PMOS beta

# --- CMOSのしきい値電圧Vthの計算 / Calculate CMOS threshold voltage ---
sqrt_ratio = np.sqrt(beta_n / beta_p)
Vth = (Vdd + V_Tp + sqrt_ratio * V_Tn) / (1 + sqrt_ratio)
# CMOSインバータの対称性に基づいた実効しきい値 / Effective switching threshold

# --- ゲインの計算 / Gain calculation ---
gmp = 1e-3  # PMOSのトランスコンダクタンス / Transconductance
gmn = 1e-3  # NMOSのトランスコンダクタンス
rop = 10e3  # PMOSの出力抵抗 / Output resistance
ron = 10e3  # NMOSの出力抵抗
gain = (gmp + gmn) / (1 / rop + 1 / ron)
# 並列接続の出力抵抗と合成トランスコンダクタンスで増幅率を算出 / Overall voltage gain

# --- CMOS由来のパラメータを表示 / Show CMOS-derived parameters ---
print("\n=== CMOS-Derived Parameters ===")
print(f"Calculated Vth: {Vth:.4f} V")
print(f"Calculated Gain: {gain:.4f}")

# --- バイアスの追加 / Add bias ---
bias = 0.01  # 出力オフセット / Output offset

# --- 加重和の計算 / Weighted sum calculation ---
z = np.dot(V_in, weights) + bias
# 入力と重みの内積にバイアスを加える / Compute dot product + bias

# --- シグモイド活性化関数による出力 / Output via sigmoid activation ---
output = sigmoid(gain * (z - Vth))
# CMOSのVthとゲインに基づいた活性化出力 / Activation based on CMOS Vth

# --- 出力結果の表示 / Display final output ---
print("\n=== Activation Output ===")
print(f"Weighted sum (z): {z:.4f}")
print(f"Sigmoid Output: {output:.4f}")

# --- プロットの準備 / Plot CMOS inverter characteristics ---
Vin = np.linspace(0, Vdd, 200)  # 入力電圧範囲 / Input voltage range
Vout = Vdd * sigmoid(gain * (Vin - Vth))  # 出力電圧 / Output voltage

# --- グラフ表示 / Plot result ---
plt.plot(Vin, Vout, label="CMOS Inverter Output (Sigmoid)")
plt.scatter(z, output * Vdd, color='red', zorder=5, label="Activated Output Point")
plt.axvline(x=Vth, color='gray', linestyle='--', label=f"Vth = {Vth:.2f}")
plt.title("CMOS Inverter Approximation with Sigmoid Function")
plt.xlabel("Input Voltage (Vin or z) [V]")
plt.ylabel("Output Voltage Vout [V]")
plt.grid(True)
plt.legend()
plt.show()




# Program Name: fet_curve_fit_channel_modulation.py
# Creation Date: 20250418
# Overview: Perform parabolic and linear regression to model FET drain characteristics including channel length modulation.
# Usage: Run in Google Colab or local Python with data points manually inserted.

# --- ライブラリのインポート / Import libraries ---
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit

# --- 1. データ定義 / Define V_DS - I_D data manually for each V_GS ---
V_DS = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18])  # V_DS values

# 各 V_GS における I_D データ [mA] / Drain current data per V_GS
data = {
    'V_GS = 0V':    [0, 0.8, 1.5, 2.0, 2.4, 2.6, 2.7, 2.75, 2.78, 2.80, 2.81, 2.82, 2.83, 2.84, 2.85],
    'V_GS = -0.4V': [0, 0.5, 1.1, 1.5, 1.8, 1.95, 2.05, 2.10, 2.13, 2.15, 2.16, 2.17, 2.18, 2.19, 2.20],
    'V_GS = -0.8V': [0, 0.3, 0.8, 1.1, 1.4, 1.55, 1.63, 1.67, 1.69, 1.70, 1.71, 1.72, 1.73, 1.735, 1.74],
}

# --- 2. モデル定義 / Define fitting models ---
def parabolic(V, a, b, c):  # 放物線モデル / Parabolic model
    return a * V**2 + b * V + c

def linear(V, m, c):        # 線形モデル / Linear model
    return m * V + c

# --- 3. 各V_GSに対して回帰とプロット / Fit and plot for each V_GS ---
plt.figure(figsize=(10, 6))

for label, I_D in data.items():
    I_D = np.array(I_D)
    
    # 分割点の設定 / Split point at V_DS = 5V
    split_idx = np.argmax(V_DS >= 5)
    
    # --- 放物線フィット / Parabolic fit ---
    popt_parab, _ = curve_fit(parabolic, V_DS[:split_idx], I_D[:split_idx])
    I_fit_parab = parabolic(V_DS[:split_idx], *popt_parab)
    a, b, c = popt_parab

    # --- 線形フィット / Linear fit ---
    popt_line, _ = curve_fit(linear, V_DS[split_idx:], I_D[split_idx:])
    I_fit_line = linear(V_DS[split_idx:], *popt_line)
    m, c_line = popt_line

    # --- 回帰式を表示 / Print fitted equations ---
    print(f"=== {label} ===")
    print(f"Parabolic Fit (V_DS <= 5V): I_D = {a:.4f}*V_DS^2 + {b:.4f}*V_DS + {c:.4f}")
    print(f"Linear Fit    (V_DS >  5V): I_D = {m:.4f}*V_DS + {c_line:.4f}")
    print()

    # --- グラフ描画 / Plot actual data and fits ---
    plt.plot(V_DS, I_D, 'o', label=label)
    plt.plot(V_DS[:split_idx], I_fit_parab, '--', color='gray')
    plt.plot(V_DS[split_idx:], I_fit_line, '--', color='black')

# --- ラベル設定 / Labels in English ---
plt.xlabel('Drain-Source Voltage $V_{DS}$ [V]')
plt.ylabel('Drain Current $I_D$ [mA]')
plt.title('FET $I_D$-$V_{DS}$ Characteristics with Channel-Length Modulation')
plt.grid(True, linestyle=':')
plt.legend()
plt.tight_layout()
plt.show()

参考

# --- プログラム名: pinns_rcl_series_circuit.py ---
# 内容: RCL直列回路の自由振動に対するPINNs(Physics-Informed Neural Networks)の実装
#      - 有限差分法によるシミュレーションデータ作成
#      - PINNsモデル構築と学習
#      - 結果可視化

import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

# --- 有限差分法によるRCL回路のシミュレーション / FDM for RCL Circuit ---
def FDM_RCL(L, R, C, q0, i0, t_end, n_steps):
    t = np.linspace(0, t_end, n_steps)
    q_num = np.zeros_like(t)
    i_num = np.zeros_like(t)
    q_num[0] = q0
    i_num[0] = i0
    dt = t[1] - t[0]
    for i in range(n_steps - 1):
        acc = -(R * i_num[i] + q_num[i] / C) / L
        i_num[i+1] = i_num[i] + acc * dt
        q_num[i+1] = q_num[i] + i_num[i+1] * dt

    omega_0 = 1 / np.sqrt(L*C)
    zeta = R / (2 * np.sqrt(L / C))
    omega_d = omega_0 * np.sqrt(1 - zeta**2)
    q_th = np.exp(-zeta * omega_0 * t) * (q0 * np.cos(omega_d * t) + (i0 + zeta * omega_0 * q0) / omega_d * np.sin(omega_d * t))

    return t, q_num, q_th

# --- Early Stopping ---
def early_Stopping(loss_values, patience=10, delta=0):
    if len(loss_values) <= patience:
        return False
    minimum = np.min(loss_values[:-patience])
    current = loss_values[-1]
    diff = abs(current - minimum)
    return diff < delta

# --- PINNsモデル定義 / Physics-Informed Neural Network ---
class PhysicsInformedNNs(nn.Module):
    def __init__(self, n_input, n_output, n_neuron, n_layer, epochs, act_fn='tanh'):
        super(PhysicsInformedNNs, self).__init__()
        self.n_input = n_input
        self.n_output = n_output
        self.n_neuron = n_neuron
        self.n_layer = n_layer
        self.epochs = epochs
        self.act_fn = getattr(F, act_fn)

        layers = [nn.Linear(n_input, n_neuron), nn.Tanh()]
        for _ in range(n_layer - 1):
            layers.append(nn.Linear(n_neuron, n_neuron))
            layers.append(nn.Tanh())
        layers.append(nn.Linear(n_neuron, n_output))
        self.layers = nn.Sequential(*layers)

    def forward(self, t):
        return self.layers(t)

    def train_step(self, t_data, q_data, t_pinn, R, C_inv, optimizer, loss_fn):
        optimizer.zero_grad()

        # データドリブンな部分
        q_pred = self.forward(t_data)
        loss1 = loss_fn(q_pred, q_data)

        # 物理誤差部分(L=1に正規化)
        q_pred_pinn = self.forward(t_pinn)
        dq_dt = torch.autograd.grad(q_pred_pinn.sum(), t_pinn, create_graph=True)[0]
        dq_dt2 = torch.autograd.grad(dq_dt.sum(), t_pinn, create_graph=True)[0]
        loss_physics = dq_dt2 + R * dq_dt + C_inv * q_pred_pinn
        loss2 = loss_fn(loss_physics, torch.zeros_like(loss_physics))

        # 合計損失
        loss = loss1 + 3.0e-4 * loss2
        loss.backward()
        optimizer.step()

        return loss, loss1, loss2, q_pred_pinn

    def train(self, t_data, q_data, t_pinn, R, C_inv, optimizer, loss_fn, early_stopping):
        loss_values = []
        q_preds = []
        for i in range(self.epochs):
            loss, loss1, loss2, q_pred = self.train_step(t_data, q_data, t_pinn, R, C_inv, optimizer, loss_fn)
            loss_values.append(loss.item())
            q_preds.append(q_pred)
            if i % 1000 == 0:
                print('Epoch:', i, 'Loss:', loss.item(), 'Data Loss:', loss1.item(), 'Physics Loss:', loss2.item())
            if early_stopping(loss_values):
                break
        return loss_values, q_preds

# --- シミュレーションパラメータと訓練の実行 ---
# RCL固有回路のパラメータ
L = 1.0
R = 0.5
C = 0.25
C_inv = 1.0 / C
q0 = 1.0
i0 = 0.0
t_end = 10.0
n_steps = 1000

# データ生成
t, q_num, q_th = FDM_RCL(L, R, C, q0, i0, t_end, n_steps)
select_data = [0, 100, 300, 600, 900]
t_data = torch.from_numpy(t[select_data]).view(-1,1).to(torch.float32)
q_data = torch.from_numpy(q_num[select_data]).view(-1,1).to(torch.float32)
t_pinn = torch.linspace(0, t_end, 1000, requires_grad=True).view(-1,1)

# モデルパラメータ
n_input = 1
n_output = 1
n_neuron = 32
n_layer = 4
epochs = 20000

# モデル初期化と訓練実行
model = PhysicsInformedNNs(n_input, n_output, n_neuron, n_layer, epochs)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
loss_fn = nn.MSELoss()

loss_values, q_preds = model.train(t_data, q_data, t_pinn, R, C_inv, optimizer, loss_fn, early_Stopping)

# --- 結果の可視化 / Visualization ---
plt.figure()
plt.plot(np.arange(len(loss_values)), loss_values)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Training Loss')

plt.figure()
plt.plot(t, q_th, label='Theoretical solution')
plt.plot(t, q_num, label='FDM solution', linestyle='dashed')
plt.plot(t_pinn.detach().numpy(), q_preds[-1].detach().numpy(), label='PINNs prediction', linestyle='dashdot')
plt.scatter(t_data.detach().numpy(), q_data.detach().numpy(), label='Training data', color='red')
plt.xlabel('Time [s]')
plt.ylabel('Charge q(t) [C]')
plt.legend()
plt.title('RCL Series Circuit via PINNs')
plt.grid()
plt.show()

# プログラム名: hopfield_voltage_gain_simulation.py
# Program Name: hopfield_voltage_gain_simulation.py

import numpy as np
import matplotlib.pyplot as plt

# --- 活性化関数(シグモイド)/ Activation Function (Sigmoid) ---
def sigmoid(x, beta=5):
    # beta: ゲイン(活性化関数の傾き) / Gain (steepness of activation function)
    return 1 / (1 + np.exp(-beta * x))

# --- エネルギー関数 / Energy Function ---
def energy(x, W, theta):
    # エネルギー関数定義 / Define Hopfield energy
    return -0.5 * np.dot(x, np.dot(W, x)) + np.dot(theta, x)

# --- ネットワーク設定 / Network Configuration ---
n = 4  # ニューロン数 / Number of neurons
np.random.seed(0)
W = np.random.randn(n, n)
W = (W + W.T) / 2  # 対称行列にする / Make symmetric
np.fill_diagonal(W, 0)  # 自己結合なし / No self-connections

theta = np.random.randn(n) * 0.1  # 小さなバイアス / Small biases

# --- 初期状態(電圧) / Initial State (Voltage) ---
v = np.random.uniform(-1, 1, size=n)

# --- パラメータ / Parameters ---
beta = 5        # シグモイドのゲイン / Gain of sigmoid
eta = 0.1       # 学習率 / Learning rate
epochs = 50     # 更新回数 / Number of updates

# --- 履歴保存 / Store history ---
v_history = [v.copy()]
e_history = [energy(sigmoid(v, beta), W, theta)]

# --- ネットワーク更新ループ / Network Update Loop ---
for epoch in range(epochs):
    x = sigmoid(v, beta)  # 出力 / Output (firing)
    dv = -np.dot(W, x) + theta  # 入力に対する勾配 / Gradient of energy
    v += eta * dv               # 電圧更新 / Update voltage
    v_history.append(v.copy())
    e_history.append(energy(sigmoid(v, beta), W, theta))

# --- プロット: エネルギーの収束 / Plot: Energy Convergence ---
plt.figure(figsize=(10, 4))
plt.plot(e_history, marker='o')
plt.xlabel("Iteration")
plt.ylabel("Energy")
plt.title("Hopfield Network Energy Minimization")
plt.grid(True)
plt.show()

# --- プロット: 各ニューロンの電圧変化 / Voltage over Time ---
v_history = np.array(v_history)
plt.figure(figsize=(10, 5))
for i in range(n):
    plt.plot(v_history[:, i], label=f'Neuron {i}')
plt.xlabel("Iteration")
plt.ylabel("Voltage")
plt.title("Neuron Voltages over Time")
plt.legend()
plt.grid(True)
plt.show()


# プログラム名: keras_sin_predict_all_in_one.py
# Program Name: keras_sin_predict_all_in_one.py
# 概要: サイン波電圧を入力 → ニューラルネットワークモデル構築 → 学習 → 学習曲線の可視化 → 予測まで一括実行
# Overview: Input sine wave voltage → Build NN model → Train → Visualize learning → Predict

# --- ライブラリのインポート / Import libraries ---
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense

# === 3-a. サイン波電圧の生成 / Generate sine wave voltage ===
x = np.linspace(-np.pi, np.pi, 100).reshape(-1, 1)  # 入力: サイン波電圧 / Input: sine wave voltage
t = np.sin(x)                                       # 教師信号(理想的な電圧)/ Target signal (ideal voltage)

# --- 入力波形の表示 / Plot input sine wave ---
plt.figure(figsize=(6, 3))
plt.plot(x, t, label="True Sine Voltage")
plt.title("Generated Sine Wave Voltage")
plt.xlabel("Time")  # 任意スケーリングされた時間軸
plt.ylabel("Voltage")
plt.grid(True)
plt.legend()
plt.show()

# === 3-b. ニューラルネットワーク構築 / Build NN model ===
batch_size = 8     # バッチサイズ / Batch size
n_in = 1           # 入力次元(電圧)/ Input dimension (Voltage)
n_mid = 20         # 中間層ユニット数 / Hidden layer units
n_out = 1          # 出力次元(電圧)/ Output dimension (Voltage)

model = Sequential()
model.add(Dense(n_mid, input_shape=(n_in,), activation="sigmoid"))  # 活性化関数: シグモイド
model.add(Dense(n_out, activation="linear"))                        # 線形出力層 / Linear output
model.compile(loss="mean_squared_error", optimizer="sgd")          # 損失関数と最適化 / Loss & Optimizer
model.summary()

# === 3-c. 学習実行 / Train the model ===
history = model.fit(x, t, batch_size=batch_size, epochs=2000, validation_split=0.1, verbose=0)

# === 3-d. 学習曲線の可視化 / Visualize learning curves ===
loss_train = history.history['loss']
loss_val = history.history['val_loss']

plt.figure(figsize=(6, 3))
plt.plot(np.arange(len(loss_train)), loss_train, label="Training Loss")
plt.plot(np.arange(len(loss_val)), loss_val, label="Validation Loss")
plt.title("Learning Curve of Sine Voltage Prediction")
plt.xlabel("Epoch")
plt.ylabel("MSE Loss")
plt.grid(True)
plt.legend()
plt.show()

# === 3-e. サイン波電圧の予測と可視化 / Predict & visualize sine voltage ===
plt.figure(figsize=(6, 3))
plt.plot(x, model.predict(x), label="Predicted Voltage")
plt.plot(x, t, label="True Voltage", linestyle='dashed')
plt.title("Sine Wave Voltage Prediction")
plt.xlabel("Time")
plt.ylabel("Voltage")
plt.grid(True)
plt.legend()
plt.show()

# プログラム名: lstm_predict_dc_plus_sine.py
# Program Name: lstm_predict_dc_plus_sine.py
# 概要: DC成分に微小なサイン波を重ねた信号を使ってLSTMで次の値を予測
# Description: Predict future voltage in a time series with DC + small sine wave using LSTM

import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
from sklearn.preprocessing import MinMaxScaler

# --- データ生成 / Generate time series data ---
t = np.linspace(0, 10, 500)                            # 時間軸 / Time axis
V_dc = 2.0                                             # DC成分 / DC component
V_ac = 0.1 * np.sin(2 * np.pi * 1 * t)                # 微小サイン波 / Small sine wave
V = V_dc + V_ac                                        # 合成信号 / Total signal

# --- スケーリング / Normalize data between 0 and 1 ---
scaler = MinMaxScaler()
V_scaled = scaler.fit_transform(V.reshape(-1, 1))

# --- 時系列データの整形 / Create sequence data for LSTM ---
def create_dataset(data, seq_len):
    X, y = [], []
    for i in range(len(data) - seq_len):
        X.append(data[i:i+seq_len])
        y.append(data[i+seq_len])
    return np.array(X), np.array(y)

seq_len = 20  # 過去20点を使って次を予測 / Use 20 points to predict the next
X, y = create_dataset(V_scaled, seq_len)

# --- LSTM入力の形に変形 / Reshape for LSTM: (samples, timesteps, features) ---
X = X.reshape((X.shape[0], X.shape[1], 1))

# --- LSTMモデル構築 / Build LSTM model ---
model = Sequential([
    LSTM(64, input_shape=(seq_len, 1), return_sequences=False),
    Dense(1)
])
model.compile(optimizer='adam', loss='mse')
model.fit(X, y, epochs=100, batch_size=16, verbose=0)

# --- 予測と逆スケーリング / Predict and inverse transform ---
y_pred = model.predict(X)
y_true = scaler.inverse_transform(y)
y_pred_inv = scaler.inverse_transform(y_pred)

# --- プロット / Plot ---
plt.figure(figsize=(10, 4))
plt.plot(t[seq_len:], y_true, label='True Voltage')
plt.plot(t[seq_len:], y_pred_inv.ravel(), label='LSTM Prediction', linestyle='--')
plt.title('LSTM Prediction of DC + Small Sine Wave')
plt.xlabel('Time [s]')
plt.ylabel('Voltage [V]')
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()

# プログラム名: first_order_mpc_nn_kalman.py
# 概要: 一次遅れ系に対するMPC制御、NN回帰予測、カルマンフィルタによる状態推定

import numpy as np
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPRegressor
import cvxpy as cp

# --- 一次遅れ系のパラメータ / First-order system parameters ---
K = 1.0
tau = 1.0
Ts = 0.1
a = np.exp(-Ts / tau)
b = K * (1 - np.exp(-Ts / tau))

# --- シミュレーション設定 / Simulation parameters ---
N = 100      # 総ステップ数
Np = 10      # MPC予測ホライズン
r = 1.0      # 目標値 / Reference
noise_std = 0.05  # 観測ノイズ

# --- 初期化 / Initialization ---
y = [0.0]
u = [0.0]
y_meas = [0.0]
y_nn = []
y_kf = []

# --- NNモデル構築 / Build NN model for one-step prediction ---
X_train = []
y_train = []

# --- カルマンフィルタ初期化 / Kalman filter initialization ---
x_kf = 0.0                # 初期状態推定値
P = 1.0                   # 共分散初期値
Q = 1e-4                  # プロセスノイズ
R = noise_std**2          # 観測ノイズ

# --- MPC関数定義 / MPC controller ---
def solve_mpc(y_now, ref):
    u_var = cp.Variable(Np)
    cost = 0
    y_pred = y_now
    for i in range(Np):
        y_pred = a * y_pred + b * u_var[i]
        cost += cp.square(ref - y_pred)
    prob = cp.Problem(cp.Minimize(cost), [u_var >= 0, u_var <= 1])
    prob.solve(solver=cp.OSQP)
    return u_var.value[0] if u_var.value is not None else 0.0

# --- メインループ / Simulation loop ---
for k in range(N):
    # --- 制御入力の決定(MPC)/ Calculate control input ---
    u_now = solve_mpc(y[-1], r)
    u.append(u_now)

    # --- システム更新 / System update ---
    y_next = a * y[-1] + b * u_now
    y.append(y_next)

    # --- ノイズ付き観測 / Add measurement noise ---
    y_obs = y_next + np.random.normal(0, noise_std)
    y_meas.append(y_obs)

    # --- データ蓄積(NN学習用) / Collect training data for NN ---
    if k > 0:
        X_train.append([u[k], y_meas[k]])
        y_train.append(y_meas[k+1] if k+1 < len(y_meas) else y_meas[k])

    # --- カルマンフィルタ更新 / Kalman filter ---
    # 予測ステップ
    x_pred = a * x_kf + b * u_now
    P = a * P * a + Q
    # 更新ステップ
    K_gain = P / (P + R)
    x_kf = x_pred + K_gain * (y_obs - x_pred)
    P = (1 - K_gain) * P
    y_kf.append(x_kf)

# --- NN学習 / Train neural network ---
reg = MLPRegressor(hidden_layer_sizes=(32, 32), max_iter=1000)
reg.fit(X_train, y_train)

# --- NN予測 / Predict with neural network ---
for k in range(N):
    input_vec = np.array([[u[k], y_meas[k]]])
    y_nn.append(reg.predict(input_vec)[0])

# --- プロット / Plot results ---
t = np.arange(N)

plt.figure(figsize=(12, 6))
plt.plot(t, y[1:], label="True Output")
plt.plot(t, y_meas[1:], label="Measured (Noisy)", alpha=0.4)
plt.plot(t, y_nn, label="NN Predicted")
plt.plot(t, y_kf, label="Kalman Filtered")
plt.xlabel("Time step")
plt.ylabel("Output y")
plt.title("First-order System: MPC + NN + Kalman Filter")
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()


# Program Name: semiconductor_property_regression.py
# Creation Date: 20250419
# Overview: Perform regression analysis on semiconductor parameters using dummy data
# Usage: Run with `python semiconductor_property_regression.py`

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression

# --- 1. ダミーデータ作成 / Create Dummy Semiconductor Data ---
np.random.seed(42)
N_d = np.logspace(15, 19, 100)  # ドーピング濃度 [cm^-3]
mu = 1500 / (1 + (N_d / 1e17)**0.5) + np.random.normal(0, 30, len(N_d))  # 電子移動度 [cm^2/Vs]
q = 1.6e-19  # 電荷 [C]
n = N_d      # n型と仮定
sigma = q * n * mu  # 導電率 [S/cm]

# --- 2. データフレームに格納 / Store in DataFrame ---
df = pd.DataFrame({
    'Doping [cm^-3]': N_d,
    'Mobility [cm^2/Vs]': mu,
    'Carrier Density [cm^-3]': n,
    'Conductivity [S/cm]': sigma
})

# --- 3. 回帰:log(N_d) vs μ ---
X = np.log10(N_d).reshape(-1, 1)
y = mu
model = LinearRegression()
model.fit(X, y)
mu_pred = model.predict(X)

# --- 4. 可視化 / Visualization ---
plt.figure(figsize=(12, 5))

# Mobility vs Doping
plt.subplot(1, 2, 1)
plt.scatter(N_d, mu, label='Data', color='blue', alpha=0.6)
plt.plot(N_d, mu_pred, label='Linear Fit in log scale', color='red')
plt.xscale('log')
plt.xlabel('Doping Concentration [cm$^{-3}$]')
plt.ylabel('Mobility [cm$^2$/Vs]')
plt.title('Mobility vs Doping Concentration')
plt.legend()
plt.grid(True)

# Conductivity vs Doping
plt.subplot(1, 2, 2)
plt.plot(N_d, sigma, label='Conductivity', color='green')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Doping Concentration [cm$^{-3}$]')
plt.ylabel('Conductivity [S/cm]')
plt.title('Conductivity vs Doping')
plt.grid(True)
plt.legend()

plt.tight_layout()
plt.show()

# --- 5. 回帰式出力 / Output regression model ---
slope = model.coef_[0]
intercept = model.intercept_
print(f"Linear regression (log scale): μ = {slope:.2f} * log10(N_d) + {intercept:.2f}")

# Program Name: rc_variation_statistics.py
# Creation Date: 20250419
# Overview: Analyze statistical variation of R (resistors) and C (capacitors) using dummy data
# Usage: Run with `python rc_variation_statistics.py`

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns

# --- 1. ダミーデータ作成 / Create Dummy Data ---
np.random.seed(0)
num_samples = 500

# 抵抗 R: 1kΩ ±5%、コンデンサ C: 100nF ±10%
R_nominal = 1e3      # 1kΩ
C_nominal = 100e-9   # 100nF

R_values = np.random.normal(loc=R_nominal, scale=R_nominal*0.05, size=num_samples)
C_values = np.random.normal(loc=C_nominal, scale=C_nominal*0.1, size=num_samples)

# --- 2. データフレームにまとめる / Combine into DataFrame ---
df = pd.DataFrame({
    'Resistance [Ω]': R_values,
    'Capacitance [F]': C_values
})

# --- 3. 基本統計量の表示 / Print basic statistics ---
print("=== Basic Statistics ===")
print(df.describe())

# --- 4. 相関係数の表示 / Correlation ---
correlation = df.corr().loc['Resistance [Ω]', 'Capacitance [F]']
print(f"\nCorrelation coefficient (R vs C): {correlation:.4f}")

# --- 5. 可視化 / Visualization ---
plt.figure(figsize=(14, 5))

# ヒストグラム
plt.subplot(1, 2, 1)
sns.histplot(df['Resistance [Ω]'], bins=30, color='skyblue', kde=True, label='R')
sns.histplot(df['Capacitance [F]'], bins=30, color='orange', kde=True, label='C')
plt.title("Histogram of R and C")
plt.xlabel("Value")
plt.legend()
plt.grid(True)

# 散布図
plt.subplot(1, 2, 2)
plt.scatter(df['Resistance [Ω]'], df['Capacitance [F]'], alpha=0.6)
plt.title("Scatter Plot of R vs C")
plt.xlabel("Resistance [Ω]")
plt.ylabel("Capacitance [F]")
plt.grid(True)

plt.tight_layout()
plt.show()


# Program Name: rc_time_constant_variation.py
# Creation Date: 20250419
# Overview: Analyze how RC variation affects time constant τ and system response using Monte Carlo simulation
# Usage: Run with `python rc_time_constant_variation.py`

import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns

# --- 1. パラメータ設定 / Nominal values and tolerances ---
np.random.seed(42)
num_samples = 1000

R_nom = 1e3       # 1kΩ
C_nom = 100e-9    # 100nF
R_tol = 0.05      # ±5%
C_tol = 0.1       # ±10%

# --- 2. Monte Carlo シミュレーション / Sample R and C with variation ---
R_samples = np.random.normal(loc=R_nom, scale=R_nom * R_tol, size=num_samples)
C_samples = np.random.normal(loc=C_nom, scale=C_nom * C_tol, size=num_samples)
tau_samples = R_samples * C_samples  # τ = R × C
trise_samples = 2.2 * tau_samples    # 応答時間(例: 立ち上がり時間)

# --- 3. 統計情報出力 / Print summary ---
print("=== τ and t_rise statistics ===")
print(f"Mean τ   = {np.mean(tau_samples)*1e6:.2f} µs")
print(f"Std  τ   = {np.std(tau_samples)*1e6:.2f} µs")
print(f"Mean t_r = {np.mean(trise_samples)*1e6:.2f} µs")
print(f"Max  t_r = {np.max(trise_samples)*1e6:.2f} µs")
print(f"Min  t_r = {np.min(trise_samples)*1e6:.2f} µs")

# --- 4. ヒストグラムと信頼区間 / Histogram with percentile range ---
plt.figure(figsize=(12, 5))

# τ分布
plt.subplot(1, 2, 1)
sns.histplot(tau_samples * 1e6, bins=40, kde=True, color='skyblue')
p1, p99 = np.percentile(tau_samples, [1, 99])
plt.axvline(p1 * 1e6, color='red', linestyle='--', label='1st percentile')
plt.axvline(p99 * 1e6, color='red', linestyle='--', label='99th percentile')
plt.title("Distribution of Time Constant τ")
plt.xlabel("τ [µs]")
plt.ylabel("Count")
plt.legend()
plt.grid(True)

# 立ち上がり時間分布
plt.subplot(1, 2, 2)
sns.histplot(trise_samples * 1e6, bins=40, kde=True, color='orange')
plt.title("Distribution of Rise Time (2.2 × τ)")
plt.xlabel("Rise Time [µs]")
plt.ylabel("Count")
plt.grid(True)

plt.tight_layout()
plt.show()

# --- 5. 可視化(応答波形) / Example output response ---
t = np.linspace(0, 1e-3, 500)
tau_nom = R_nom * C_nom
v_nom = 1 - np.exp(-t / tau_nom)
v_min = 1 - np.exp(-t / np.max(tau_samples))
v_max = 1 - np.exp(-t / np.min(tau_samples))

plt.figure(figsize=(8, 5))
plt.plot(t * 1e3, v_nom, label='Nominal RC', color='black')
plt.plot(t * 1e3, v_min, '--', label='Fastest (min τ)', color='green')
plt.plot(t * 1e3, v_max, '--', label='Slowest (max τ)', color='red')
plt.title("Step Response with RC Variation")
plt.xlabel("Time [ms]")
plt.ylabel("Voltage [V]")
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()



# プログラム名: rnn_gru_lstm_voltage_forecast.py
# 概要: ノイズ付き・非定常な電圧波形を RNN / GRU / LSTM で予測し、性能を比較する
# Purpose: Compare RNN, GRU, and LSTM on forecasting noisy, non-stationary voltage signals

import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import SimpleRNN, GRU, LSTM, Dense

# --- 電圧波形生成(ノイズ付き・周期変動)/ Generate noisy voltage signal with frequency modulation ---
def generate_voltage_wave(timesteps, noise_level=0.1, freq_modulation=False):
    x = np.linspace(0, 10*np.pi, timesteps)
    if freq_modulation:
        voltage = np.sin(x + 0.5 * np.sin(0.1 * x))  # 周波数がゆっくり変動 / frequency modulation
    else:
        voltage = np.sin(x)
    noise = np.random.normal(0, noise_level, size=x.shape)
    return x, voltage + noise

# --- データ整形 / Prepare time series for supervised learning ---
def prepare_sequences(y, n_input=10, predict_steps=10):
    scaler = MinMaxScaler()
    y_scaled = scaler.fit_transform(y.reshape(-1, 1))
    X, Y = [], []
    for i in range(len(y_scaled) - n_input - predict_steps):
        X.append(y_scaled[i:i+n_input])
        Y.append(y_scaled[i+n_input:i+n_input+predict_steps])
    return np.array(X), np.array(Y), scaler

# --- モデル構築 / Build model based on type ---
def build_model(model_type, n_input, predict_steps):
    model = Sequential()
    if model_type == 'RNN':
        model.add(SimpleRNN(50, activation='tanh', input_shape=(n_input, 1)))
    elif model_type == 'GRU':
        model.add(GRU(50, activation='tanh', input_shape=(n_input, 1)))
    elif model_type == 'LSTM':
        model.add(LSTM(50, activation='tanh', input_shape=(n_input, 1)))
    model.add(Dense(predict_steps))
    model.compile(optimizer='adam', loss='mse')
    return model

# --- 比較実行 / Execute and compare prediction results ---
def run_comparison(timesteps=200, noise_level=0.1, freq_modulation=False):
    x, voltage = generate_voltage_wave(timesteps, noise_level, freq_modulation)
    n_input, predict_steps = 20, 10
    X, Y, scaler = prepare_sequences(voltage, n_input, predict_steps)

    results = {}
    for model_type in ['RNN', 'GRU', 'LSTM']:
        model = build_model(model_type, n_input, predict_steps)
        model.fit(X, Y, epochs=100, verbose=0)
        x_input = X[-1].reshape(1, n_input, 1)
        y_pred = model.predict(x_input)
        y_pred_rescaled = scaler.inverse_transform(y_pred.reshape(-1, 1))
        results[model_type] = y_pred_rescaled

    # --- プロット:電圧波形予測 / Plot voltage forecast results ---
    plt.figure(figsize=(12, 6))
    plt.plot(range(timesteps), voltage, label="Measured Voltage (Noisy + Modulated)")
    for model_type, y_hat in results.items():
        plt.plot(range(timesteps, timesteps + predict_steps), y_hat, '--', label=f"{model_type} Prediction")
    plt.title("Voltage Signal Forecast: RNN vs GRU vs LSTM")
    plt.xlabel("Time Step")
    plt.ylabel("Voltage (V)")
    plt.legend()
    plt.grid(True)
    plt.show()

# --- 実行:ノイズ付き・非定常な電圧信号予測 / Run ---
run_comparison(timesteps=200, noise_level=0.15, freq_modulation=True)


# プログラム名: mosfet_variation_analysis_with_median.py
# 概要: MOSFETパラメータのばらつきを統計的に分析(中央値含む)し、可視化
# Purpose: Analyze variations in MOSFET parameters including median, and visualize them

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns

# --- データ生成 / Simulate MOSFET parameter variations ---
np.random.seed(0)
n = 100  # サンプル数 / Number of samples

# 各パラメータにバラツキを与える / Add variation to each parameter
data = {
    'Vth (V)': np.random.normal(loc=0.7, scale=0.05, size=n),
    'W (µm)': np.random.normal(loc=10, scale=1.0, size=n),
    'L (µm)': np.random.normal(loc=0.18, scale=0.01, size=n),
    'mu_n (cm²/V·s)': np.random.normal(loc=450, scale=20, size=n),
}

df = pd.DataFrame(data)

# --- 統計量計算 / Calculate statistics ---
stats = df.describe().T
stats['variance'] = df.var()
stats['std_dev'] = df.std()
stats['median'] = df.median()

# Zスコア計算 / Calculate Z-scores per column
z_scores = pd.DataFrame({
    col: (df[col] - df[col].mean()) / df[col].std()
    for col in df.columns
})

# --- 統計情報表示 / Print statistics including median ---
print(" MOSFET Parameter Statistics (Including Median):\n")
print(stats[['mean', 'median', 'std_dev', 'variance']].round(4))

# --- プロット①: ヒストグラム / Histograms for each parameter ---
df.hist(bins=20, figsize=(12, 6), edgecolor='black')
plt.suptitle("Histogram of MOSFET Parameters")
plt.tight_layout()
plt.show()

# --- プロット②: 箱ひげ図 / Boxplot of parameter variations ---
plt.figure(figsize=(10, 5))
sns.boxplot(data=df)
plt.title("Boxplot of MOSFET Parameter Variations")
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()

# --- プロット③: Zスコアのヒートマップ(オプション)/ Optional z-score heatmap ---
plt.figure(figsize=(10, 6))
sns.heatmap(z_scores.T, cmap="coolwarm", center=0, cbar_kws={"label": "Z-score"})
plt.title("Z-score Heatmap of MOSFET Samples")
plt.xlabel("Sample Index")
plt.ylabel("Parameter")
plt.tight_layout()
plt.show()


import numpy as np
import matplotlib.pyplot as plt
#mosfet_statistical_variation_plot.py
# --- 定数定義 / Define constants ---
VDS = 1.5       # ドレイン-ソース間電圧 / Drain-to-source voltage [V]
VA = 50         # アーリ電圧 / Early voltage [V]

# --- 平均と標準偏差(ばらつき)/ Define means and stddev for statistical variation ---
mu_nCox_mean = 200e-6  # [A/V^2] 平均 / Mean
mu_nCox_std = 20e-6    # 標準偏差 / Std dev

Vth_mean = 0.6         # [V] 平均しきい値電圧 / Threshold voltage
Vth_std = 0.05

W_L_mean = 10          # 幅/長さ比 W/L
W_L_std = 1

VGS_mean = 1.2         # [V] ゲートソース間電圧
VGS_std = 0.02

# --- モンテカルロシミュレーション数 / Number of Monte Carlo samples ---
N = 100000

# --- パラメータ生成 / Generate random parameters ---
mu_nCox = np.random.normal(mu_nCox_mean, mu_nCox_std, N)
Vth = np.random.normal(Vth_mean, Vth_std, N)
W_L = np.random.normal(W_L_mean, W_L_std, N)
VGS = np.random.normal(VGS_mean, VGS_std, N)

# --- 1. Vov = VGS - Vth / オーバードライブ電圧の計算 ---
Vov = VGS - Vth

# --- 2. IDS(チャネル長変調付き)/ Drain current in saturation with CLM ---
IDS = 0.5 * mu_nCox * W_L * Vov**2 * (1 + VDS / VA)

# --- 3. gm(トランスコンダクタンス)/ Transconductance calculation ---
gm = mu_nCox * W_L * Vov

# --- 4. チャネル長変調係数(λ)と r_o(出力抵抗) ---
lambda_CLM = 1 / VA                # チャネル長変調係数 / Channel length modulation factor
ro = 1 / (lambda_CLM * IDS)       # 出力抵抗 r_o = 1 / (λ × IDS)

# --- 統計出力 / Print statistical summary ---
print("▼ Statistical Summary")
print(f"Mean IDS = {np.mean(IDS)*1e6:.2f} μA, Std = {np.std(IDS)*1e6:.2f} μA")
print(f"Mean gm  = {np.mean(gm)*1e3:.2f} mS, Std = {np.std(gm)*1e3:.2f} mS")
print(f"Mean Vov = {np.mean(Vov):.3f} V, Std = {np.std(Vov):.3f} V")
print(f"Mean r_o = {np.mean(ro)/1e3:.2f} kΩ, Std = {np.std(ro)/1e3:.2f}")

# --- ヒストグラムプロット / Histogram plots ---
plt.figure(figsize=(14, 8))

# IDS
plt.subplot(2, 2, 1)
plt.hist(IDS * 1e6, bins=50, color='skyblue', edgecolor='black')
plt.title("Distribution of $I_{DS}$ (μA)")
plt.xlabel("$I_{DS}$ [μA]")
plt.ylabel("Frequency")

# gm
plt.subplot(2, 2, 2)
plt.hist(gm * 1e3, bins=50, color='salmon', edgecolor='black')
plt.title("Distribution of $g_m$ (mS)")
plt.xlabel("$g_m$ [mS]")
plt.ylabel("Frequency")

# Vov
plt.subplot(2, 2, 3)
plt.hist(Vov, bins=50, color='lightgreen', edgecolor='black')
plt.title("Distribution of $V_{ov} = V_{GS} - V_T$")
plt.xlabel("$V_{ov}$ [V]")
plt.ylabel("Frequency")

# ro
plt.subplot(2, 2, 4)
plt.hist(ro / 1e3, bins=50, color='plum', edgecolor='black')
plt.title("Distribution of $r_o = \\frac{1}{\\lambda I_{DS}}$ (kΩ)")
plt.xlabel("$r_o$ [kΩ]")
plt.ylabel("Frequency")

plt.tight_layout()
plt.show()
1
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
1
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?