0
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

機械学習メモ

Last updated at Posted at 2024-07-20

import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error

# サンプルデータの生成
np.random.seed(0)
X = np.random.rand(100, 1) * 10  # 入力データ
y = 2.5 * X + np.random.randn(100, 1) * 2  # 実際の値(ノイズ付き)

# 線形回帰モデルの学習
model = LinearRegression()
model.fit(X, y)
y_pred = model.predict(X)  # 観測機械学習による予測値

# 実感誤差の計算
error = y - y_pred
mse = mean_squared_error(y, y_pred)

# プロットの作成
plt.figure(figsize=(12, 6))

# 実際のデータと予測データのプロット
plt.subplot(1, 2, 1)
plt.scatter(X, y, label='Actual Data')
plt.plot(X, y_pred, color='red', label='Predicted Data')
plt.xlabel('Input (X)')
plt.ylabel('Output (y)')
plt.title('Actual vs Predicted Data')
plt.legend()
plt.grid(True)

# 実感誤差のプロット
plt.subplot(1, 2, 2)
plt.scatter(X, error, label='Perceived Error', color='orange')
plt.axhline(y=0, color='black', linestyle='--')
plt.xlabel('Input (X)')
plt.ylabel('Error (y - y_pred)')
plt.title(f'Perceived Error (MSE: {mse:.2f})')
plt.legend()
plt.grid(True)

plt.tight_layout()
plt.show()




import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit

# サンプルデータの生成
np.random.seed(0)
X = np.linspace(0, 10, 100)
# 真の関数 y = a * exp(b * X) + c
a, b, c = 2.5, 1.3, 0.5
y_true = a * np.exp(b * X) + c
# ノイズを加えたデータ
y = y_true + np.random.normal(scale=2, size=X.shape)

# 推定関数の定義
def model_func(X, a, b, c):
    return a * np.exp(b * X) + c

# 最適化を用いて関数のパラメータを推定
popt, pcov = curve_fit(model_func, X, y, p0=(1, 1, 1))
a_est, b_est, c_est = popt

# 推定関数のプロット
y_est = model_func(X, *popt)

# プロットの作成
plt.figure(figsize=(10, 6))

# サンプルデータのプロット
plt.scatter(X, y, label='Noisy Data', color='blue', s=10)

# 真の関数のプロット
plt.plot(X, y_true, label='True Function', color='green')

# 推定関数のプロット
plt.plot(X, y_est, label=f'Estimated Function: a={a_est:.2f}, b={b_est:.2f}, c={c_est:.2f}', color='red')

# プロットの装飾
plt.xlabel('X')
plt.ylabel('y')
plt.title('Function Estimation and Optimization')
plt.legend()
plt.grid(True)
plt.show()

import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import BayesianRidge
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C

# サンプルデータの生成
np.random.seed(0)
X = np.linspace(0, 10, 100).reshape(-1, 1)
y_true = np.sin(X).ravel()
y = y_true + np.random.normal(scale=0.5, size=y_true.shape)

# ベイズ線形回帰モデル
bayesian_ridge = BayesianRidge()
bayesian_ridge.fit(X, y)
y_pred_bayes = bayesian_ridge.predict(X)

# ガウス過程回帰モデル
kernel = C(1.0, (1e-3, 1e3)) * RBF(1, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=10)
gp.fit(X, y)
y_pred_gp, sigma = gp.predict(X, return_std=True)

# プロットの作成
plt.figure(figsize=(14, 7))

# サンプルデータのプロット
plt.scatter(X, y, label='Noisy Data', color='blue', s=10)

# 真の関数のプロット
plt.plot(X, y_true, label='True Function', color='green')

# ベイズ線形回帰モデルのプロット
plt.plot(X, y_pred_bayes, label='Bayesian Ridge Regression', color='red')

# ガウス過程回帰モデルのプロット
plt.plot(X, y_pred_gp, label='Gaussian Process Regression', color='orange')
plt.fill_between(X.ravel(), y_pred_gp - 1.96*sigma, y_pred_gp + 1.96*sigma, color='orange', alpha=0.2)

# プロットの装飾
plt.xlabel('X')
plt.ylabel('y')
plt.title('Bayesian Ridge Regression vs Gaussian Process Regression')
plt.legend()
plt.grid(True)
plt.show()




import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal

# サンプルデータの生成
np.random.seed(42)
X = np.linspace(0, 10, 20)
y = 2.5 * X + np.random.normal(0, 1, X.shape[0])

# ベイズ線形回帰の事前分布
alpha = 2.0  # 事前分布の精度(分散の逆数)
beta = 25.0  # 観測ノイズの精度(分散の逆数)

# デザイン行列の作成
Phi = np.vstack([np.ones(X.shape), X]).T

# 事前分布のパラメータ
m0 = np.zeros(2)  # 事前分布の平均
S0 = np.eye(2) / alpha  # 事前分布の共分散行列

# 事後分布の計算
Sn_inv = np.linalg.inv(S0) + beta * Phi.T @ Phi
Sn = np.linalg.inv(Sn_inv)
mn = Sn @ (np.linalg.inv(S0) @ m0 + beta * Phi.T @ y)

# 予測分布の計算
X_new = np.linspace(0, 10, 100)
Phi_new = np.vstack([np.ones(X_new.shape), X_new]).T
y_pred = Phi_new @ mn
y_var = 1 / beta + np.sum(Phi_new @ Sn * Phi_new, axis=1)
y_std = np.sqrt(y_var)

# プロットの作成
plt.figure(figsize=(10, 6))

# 観測データのプロット
plt.scatter(X, y, color='blue', label='Observed data')

# 事前分布のプロット
w0, w1 = np.random.multivariate_normal(m0, S0, 5).T
for i in range(5):
    plt.plot(X_new, w0[i] + w1[i] * X_new, color='green', alpha=0.3, label='Prior samples' if i == 0 else "")

# 事後分布のプロット
plt.plot(X_new, y_pred, color='red', label='Posterior mean')
plt.fill_between(X_new, y_pred - 1.96 * y_std, y_pred + 1.96 * y_std, color='red', alpha=0.2, label='95% confidence interval')

# プロットの装飾
plt.xlabel('Input (X)')
plt.ylabel('Output (y)')
plt.title('Bayesian Linear Regression')
plt.legend()
plt.grid(True)
plt.show()


import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C

# サンプルデータの生成
np.random.seed(42)
X = np.linspace(0, 10, 20).reshape(-1, 1)
y = np.sin(X).ravel() + np.random.normal(0, 0.1, X.shape[0])

# カーネル関数の設定
kernel = C(1.0, (1e-3, 1e3)) * RBF(1, (1e-2, 1e2))

# ガウス過程回帰モデルの作成
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=10, alpha=0.1)

# モデルのフィッティング
gp.fit(X, y)

# 新しいデータポイントの作成
X_new = np.linspace(0, 10, 1000).reshape(-1, 1)
y_mean, y_std = gp.predict(X_new, return_std=True)

# プロットの作成
plt.figure(figsize=(14, 7))

# 観測データのプロット
plt.scatter(X, y, c='blue', label='Observed Data')

# ガウス過程の予測のプロット
plt.plot(X_new, y_mean, 'r-', label='Mean Prediction')
plt.fill_between(X_new.ravel(), y_mean - 1.96*y_std, y_mean + 1.96*y_std, alpha=0.2, color='orange', label='95% Confidence Interval')

# プロットの装飾
plt.xlabel('Input (X)')
plt.ylabel('Output (y)')
plt.title('Gaussian Process Regression with RBF Kernel')
plt.legend()
plt.grid(True)
plt.show()



import numpy as np
import matplotlib.pyplot as plt
from scipy.special import expit
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial

# Sample data generation
np.random.seed(42)
X = np.linspace(0, 10, 100)
y = (X > 5).astype(int)  # Binary outcome

# Adding noise
X += np.random.normal(scale=0.5, size=X.shape)
y += np.random.binomial(1, 0.1, size=y.shape)
y = np.clip(y, 0, 1)

# Link function (logit)
def logit(x):
    return expit(x)

# Parametric model using Generalized Linear Model (GLM)
X_ = np.column_stack((np.ones(X.shape[0]), X))  # Adding intercept
model = GLM(y, X_, family=Binomial())
results = model.fit()

# Predictive distribution
X_new = np.linspace(0, 10, 100)
X_new_ = np.column_stack((np.ones(X_new.shape[0]), X_new))
y_pred = results.predict(X_new_)

# Plotting
plt.figure(figsize=(10, 6))

# Observed data
plt.scatter(X, y, label='Observed data', color='blue')

# Link function
plt.plot(X_new, logit(results.params[0] + results.params[1] * X_new), label='Link function (logit)', color='red')

# Predictive distribution
plt.plot(X_new, y_pred, label='Predictive distribution', color='green')

# Plot decorations
plt.xlabel('Input (X)')
plt.ylabel('Output (y)')
plt.title('Parametric Modeling with Link Function and Predictive Distribution')
plt.legend()
plt.grid(True)
plt.show()



import numpy as np
from scipy.stats import beta

# トンプソン抽出のための初期化
n_arms = 2
alpha = np.ones(n_arms)  # ベータ分布のα
beta_param = np.ones(n_arms)  # ベータ分布のβ
n_trials = 1000  # 試行回数

def simulate_rewards():
    """各アームの報酬をシミュレートする"""
    return np.random.rand(n_arms)

def select_arm():
    """トンプソン抽出に基づいてアームを選択する"""
    samples = [beta.rvs(a, b) for a, b in zip(alpha, beta_param)]
    return np.argmax(samples)

# シミュレーション
for _ in range(n_trials):
    arm = select_arm()
    rewards = simulate_rewards()
    reward = rewards[arm]
    
    # アームの報酬を更新
    alpha[arm] += reward
    beta_param[arm] += 1 - reward

# 結果
print("各アームの成功回数:", alpha)
print("各アームの失敗回数:", beta_param)




import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C

# サンプルデータと目的関数
def objective_function(x):
    return np.sin(x)

# データポイントの生成
X_train = np.array([[1.0], [2.0], [3.0], [4.0]])
y_train = objective_function(X_train).ravel()

# ガウス過程のカーネル設定
kernel = C(1.0, (1e-3, 1e3)) * RBF(1.0, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=10)

# モデルの学習
gp.fit(X_train, y_train)

# 推定ポイントの生成
X_test = np.linspace(0, 5, 1000).reshape(-1, 1)
y_pred, sigma = gp.predict(X_test, return_std=True)

# プロット
plt.figure()
plt.plot(X_train, y_train, 'r.', markersize=10, label='Observations')
plt.plot(X_test, y_pred, 'b-', label='Prediction')
plt.fill_between(X_test.ravel(), y_pred - 1.96 * sigma, y_pred + 1.96 * sigma, alpha=0.2, color='blue')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Gaussian Process Regression')
plt.legend()
plt.show()


import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C

# 目的関数
def objective_function(x):
    return np.sin(x)

# サンプルデータの生成
X_train = np.array([[1.0], [2.0], [3.0], [4.0]])
y_train = objective_function(X_train).ravel()

# ガウス過程のカーネル設定
kernel = C(1.0, (1e-3, 1e3)) * RBF(1.0, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=10)

# モデルの学習
gp.fit(X_train, y_train)

# 推定ポイントの生成
X_test = np.linspace(0, 5, 1000).reshape(-1, 1)
y_pred, sigma = gp.predict(X_test, return_std=True)

# レベルセットの生成
level = 0.0  # レベルセットの値
level_set = (y_pred >= level).astype(int)

# プロット
plt.figure(figsize=(12, 6))

# 目的関数のプロット
plt.subplot(1, 2, 1)
plt.plot(X_train, y_train, 'r.', markersize=10, label='Observations')
plt.plot(X_test, y_pred, 'b-', label='GP Prediction')
plt.fill_between(X_test.ravel(), y_pred - 1.96 * sigma, y_pred + 1.96 * sigma, alpha=0.2, color='blue')
plt.axhline(y=level, color='k', linestyle='--', label='Level Set')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Gaussian Process Regression')
plt.legend()

# レベルセットのプロット
plt.subplot(1, 2, 2)
plt.plot(X_test, level_set, 'g-', label='Level Set')
plt.xlabel('x')
plt.ylabel('Level Set Indicator')
plt.title('Level Set Estimation')
plt.legend()

plt.tight_layout()
plt.show()



import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report

# データのロードと分割
data = load_iris()
X = data.data
y = data.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)

# モデルの訓練
model = LogisticRegression(max_iter=200)
model.fit(X_train, y_train)

# 予測
y_pred = model.predict(X_test)

# メトリックの計算
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average='weighted')
recall = recall_score(y_test, y_pred, average='weighted')
f1 = f1_score(y_test, y_pred, average='weighted')

# 結果の表示
print(f'Accuracy: {accuracy:.2f}')
print(f'Avg. Precision: {precision:.2f}')
print(f'Avg. Recall: {recall:.2f}')
print(f'Avg. F-Measures: {f1:.2f}')

# 詳細な分類レポート
print('\nClassification Report:')
print

(classification_report(y_test, y_pred))
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve

# 学習曲線の計算
train_sizes, train_scores, test_scores = learning_curve(
    LogisticRegression(max_iter=200), X, y, cv=5, n_jobs=-1,
    train_sizes=np.linspace(0.1, 1.0, 10)
)

# 平均と標準偏差の計算
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)

# プロット
plt.figure()
plt.plot(train_sizes, train_mean, 'o-', color='r', label='Training score')
plt.plot(train_sizes, test_mean, 'o-', color='g', label='Cross-validation score')

# グラフの装飾
plt.title('Learning Curve')
plt.xlabel('Training examples')
plt.ylabel('Score')
plt.legend(loc='best')
plt.grid()
plt.show()





分類 (Classification)
回帰 (Regression)
クラスタリング (Clustering)


import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification, make_regression, make_blobs
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_error

# 分類データの生成
X_class, y_class = make_classification(n_samples=200, n_features=2, n_classes=2, n_redundant=0, random_state=42)

# 回帰データの生成
X_reg, y_reg = make_regression(n_samples=100, n_features=1, noise=20, random_state=42)

# クラスタリングデータの生成
X_cluster, _ = make_blobs(n_samples=150, centers=3, n_features=2, random_state=42)

# 分類モデルの学習
X_train_class, X_test_class, y_train_class, y_test_class = train_test_split(X_class, y_class, test_size=0.3, random_state=42)
model_class = LogisticRegression()
model_class.fit(X_train_class, y_train_class)
y_pred_class = model_class.predict(X_test_class)
accuracy = accuracy_score(y_test_class, y_pred_class)

# 回帰モデルの学習
X_train_reg, X_test_reg, y_train_reg, y_test_reg = train_test_split(X_reg, y_reg, test_size=0.3, random_state=42)
model_reg = LinearRegression()
model_reg.fit(X_train_reg, y_train_reg)
y_pred_reg = model_reg.predict(X_test_reg)
mse = mean_squared_error(y_test_reg, y_pred_reg)

# クラスタリングモデルの学習
model_cluster = KMeans(n_clusters=3, random_state=42)
y_cluster = model_cluster.fit_predict(X_cluster)

# プロット
fig, axes = plt.subplots(1, 3, figsize=(18, 6))

# 分類プロット
axes[0].scatter(X_class[:, 0], X_class[:, 1], c=y_class, cmap='bwr', edgecolor='k', s=50)
axes[0].set_title(f'Classification (Accuracy: {accuracy:.2f})')
axes[0].set_xlabel('Feature 1')
axes[0].set_ylabel('Feature 2')

# 回帰プロット
axes[1].scatter(X_reg, y_reg, color='blue', edgecolor='k', s=50)
axes[1].plot(X_reg, model_reg.predict(X_reg), color='red', linewidth=2)
axes[1].set_title(f'Regression (MSE: {mse:.2f})')
axes[1].set_xlabel('Feature')
axes[1].set_ylabel('Target')

# クラスタリングプロット
axes[2].scatter(X_cluster[:, 0], X_cluster[:, 1], c=y_cluster, cmap='viridis', edgecolor='k', s=50)
axes[2].set_title('Clustering')
axes[2].set_xlabel('Feature 1')
axes[2].set_ylabel('Feature 2')

plt.tight_layout()
plt.show()


次元削減 (Dimensionality Reduction)
教師あり学習 (Supervised Learning)
教師なし学習 (Unsupervised Learning)

import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification, make_blobs
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

# データ生成
X_class, y_class = make_classification(n_samples=200, n_features=5, n_classes=2, n_redundant=0, random_state=42)
X_blob, _ = make_blobs(n_samples=200, centers=3, n_features=5, random_state=42)

# 次元削減 (PCA)
pca = PCA(n_components=2)
X_class_pca = pca.fit_transform(X_class)
X_blob_pca = pca.transform(X_blob)

# 教師あり学習 (分類)
X_train_class, X_test_class, y_train_class, y_test_class = train_test_split(X_class_pca, y_class, test_size=0.3, random_state=42)
model_class = LogisticRegression()
model_class.fit(X_train_class, y_train_class)
y_pred_class = model_class.predict(X_test_class)
accuracy = accuracy_score(y_test_class, y_pred_class)

# 教師なし学習 (クラスタリング)
model_cluster = KMeans(n_clusters=3, random_state=42)
y_blob_cluster = model_cluster.fit_predict(X_blob_pca)

# プロット
fig, axes = plt.subplots(1, 3, figsize=(18, 6))

# 次元削減後のデータプロット
axes[0].scatter(X_class_pca[:, 0], X_class_pca[:, 1], c=y_class, cmap='bwr', edgecolor='k', s=50)
axes[0].set_title('PCA - Classification')
axes[0].set_xlabel('Principal Component 1')
axes[0].set_ylabel('Principal Component 2')

# 教師あり学習の結果プロット
axes[1].scatter(X_class_pca[:, 0], X_class_pca[:, 1], c=y_class, cmap='bwr', edgecolor='k', s=50)
axes[1].set_title(f'Classification (Accuracy: {accuracy:.2f})')
axes[1].set_xlabel('Principal Component 1')
axes[1].set_ylabel('Principal Component 2')

# 教師なし学習の結果プロット
axes[2].scatter(X_blob_pca[:, 0], X_blob_pca[:, 1], c=y_blob_cluster, cmap='viridis', edgecolor='k', s=50)
axes[2].set_title('Clustering (KMeans)')
axes[2].set_xlabel('Principal Component 1')
axes[2].set_ylabel('Principal Component 2')

plt.tight_layout()
plt.show()



強化学習 (Reinforcement Learning)


import numpy as np
import matplotlib.pyplot as plt

# 環境設定
n_states = 5
n_actions = 2
n_episodes = 1000
alpha = 0.1  # 学習率
gamma = 0.9  # 割引率
epsilon = 0.1  # 探索率

# Qテーブルの初期化
Q = np.zeros((n_states, n_actions))

def select_action(state):
    if np.random.rand() < epsilon:
        return np.random.randint(n_actions)  # ランダムなアクション
    else:
        return np.argmax(Q[state])  # 最大のQ値を持つアクション

# 学習
for episode in range(n_episodes):
    state = np.random.randint(n_states)
    done = False
    while not done:
        action = select_action(state)
        next_state = (state + action) % n_states  # 状態遷移
        reward = 1 if next_state == n_states - 1 else 0  # 報酬設定
        Q[state, action] += alpha * (reward + gamma * np.max(Q[next_state]) - Q[state, action])
        state = next_state
        if state == n_states - 1:
            done = True

# Qテーブルのプロット
plt.imshow(Q, cmap='hot', interpolation='nearest')
plt.colorbar()
plt.title('Q-Table')
plt.xlabel('Actions')
plt.ylabel('States')
plt.show()



import numpy as np
import matplotlib.pyplot as plt

# 行列の定義
A = np.array([[1, 2], [3, 4]])
B = np.array([[5, 6], [7, 8]])

# 行列乗算
C = np.dot(A, B)

# 特異値分解 (SVD)
U, S, Vt = np.linalg.svd(A)
S_matrix = np.zeros_like(A, dtype=float)
np.fill_diagonal(S_matrix, S)

# 固有値と固有ベクトル
eigenvalues, eigenvectors = np.linalg.eig(A)

# 結果の表示
print("Matrix A:")
print(A)
print("\nMatrix B:")
print(B)
print("\nMatrix A multiplied by B:")
print(C)
print("\nSVD of Matrix A:")
print("U:")
print(U)
print("S:")
print(S)
print("Vt:")
print(Vt)
print("\nEigenvalues of Matrix A:")
print(eigenvalues)
print("Eigenvectors of Matrix A:")
print(eigenvectors)

# 可視化
plt.figure(figsize=(12, 6))

# 行列のヒートマップ
plt.subplot(1, 2, 1)
plt.imshow(A, cmap='viridis', interpolation='none')
plt.title('Matrix A')
plt.colorbar()

plt.subplot(1, 2, 2)
plt.plot(eigenvalues, 'o', label='Eigenvalues')
plt.title('Eigenvalues of Matrix A')
plt.xlabel('Index')
plt.ylabel('Value')
plt.grid(True)
plt.legend()

plt.tight_layout()
plt.show()


import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sympy import symbols, Eq, solve

# Define the problem parameters
total_birds = 10
total_legs = 28

# Create the data for regression
x = np.arange(11).reshape(-1, 1)  # Cranes from 0 to 10
y = 2 * (10 - x) + 4 * x  # Total legs for each number of cranes

# Train the linear regression model
model = LinearRegression()
model.fit(x, y)

# Predict values for plotting the regression line
x_range = np.linspace(0, 10, 100).reshape(-1, 1)
y_pred = model.predict(x_range)

# Solve the equations for the exact solution
x_sym, y_sym = symbols('x y')
eq1 = Eq(x_sym + y_sym, total_birds)
eq2 = Eq(2*x_sym + 4*y_sym, total_legs)
solution = solve((eq1, eq2), (x_sym, y_sym))
cranes = solution[x_sym]
turtles = solution[y_sym]

# Create data for plotting
x_values = np.arange(0, total_birds + 1)
y_values = total_birds - x_values
legs_values = 2 * x_values + 4 * y_values

# Plotting
plt.figure(figsize=(10, 6))

# Plot the total number of legs vs. cranes
plt.plot(x_values, legs_values, 'o', label='Legs Count', color='blue')

# Plot the linear regression line
plt.plot(x_range, y_pred, color='red', linestyle='--', label='Regression Line')

# Highlight the solution point
plt.plot(cranes, 2*cranes + 4*turtles, 'ro', label='Solution Point')

# Labels and title
plt.xlabel('Number of Cranes')
plt.ylabel('Total Number of Legs')
plt.title('Cranes and Turtles Problem with Linear Regression')
plt.legend()
plt.grid(True)

plt.show()

print(f"Number of Cranes: {cranes}")
print(f"Number of Turtles: {turtles}")



import numpy as np
import matplotlib.pyplot as plt

# Define the vector
v = np.array([1, -2, 3, -4, 5])

# Calculate norms
l1_norm = np.sum(np.abs(v))
l2_norm = np.sqrt(np.sum(v**2))
l_inf_norm = np.max(np.abs(v))

# Print the norms
print(f"L1 Norm: {l1_norm}")
print(f"L2 Norm: {l2_norm}")
print(f"L∞ Norm: {l_inf_norm}")

# Plot the vector and norms
plt.figure(figsize=(10, 6))
plt.bar(range(len(v)), v, color='blue', alpha=0.7, label='Vector v')
plt.axhline(y=l1_norm, color='r', linestyle='--', label=f'L1 Norm: {l1_norm}')
plt.axhline(y=l2_norm, color='g', linestyle='--', label=f'L2 Norm: {l2_norm}')
plt.axhline(y=l_inf_norm, color='orange', linestyle='--', label=f'L∞ Norm: {l_inf_norm}')

plt.xlabel('Index')
plt.ylabel('Value')
plt.title('Vector v and Norms Plot')
plt.legend()
plt.grid(True)
plt.show()



import numpy as np
import scipy.linalg as linalg

# Define the matrix
A = np.array([[4, 2], [3, 1]])

# LU Decomposition
P, L, U = linalg.lu(A)
print("LU Decomposition:")
print("P (Permutation Matrix):")
print(P)
print("L (Lower Triangular Matrix):")
print(L)
print("U (Upper Triangular Matrix):")
print(U)

# Eigenvalues and Eigenvectors
eigenvalues, eigenvectors = np.linalg.eig(A)
print("\nEigenvalues:")
print(eigenvalues)
print("Eigenvectors:")
print(eigenvectors)

# Singular Value Decomposition (SVD)
U_svd, S, Vt = np.linalg.svd(A)
print("\nSingular Value Decomposition:")
print("U (Left Singular Vectors):")
print(U_svd)
print("S (Singular Values):")
print(S)
print("V^T (Right Singular Vectors Transposed):")
print(Vt)




import numpy as np
import matplotlib.pyplot as plt

# Define a 2D point
point = np.array([1, 2])

# Define the linear transformation matrix A and translation vector b
A = np.array([[2, 0], [0, 1]])
b = np.array([1, -1])

# Perform affine transformation
transformed_point = A @ point + b

print("Original Point:", point)
print("Transformed Point:", transformed_point)

# Plot
plt.figure(figsize=(6, 6))
plt.plot(*point, 'ro', label='Original Point')
plt.plot(*transformed_point, 'bo', label='Transformed Point')
plt.axhline(0, color='black',linewidth=0.5)
plt.axvline(0, color='black',linewidth=0.5)
plt.grid(True)
plt.title('Affine Transformation')
plt.legend()
plt.xlabel('X')
plt.ylabel('Y')
plt.show()





from sklearn.decomposition import PCA
import numpy as np
import matplotlib.pyplot as plt

# Generate sample data
np.random.seed(0)
data = np.random.randn(100, 3)  # 100 points in 3D

# Apply PCA
pca = PCA(n_components=2)  # Project to 2D
projected_data = pca.fit_transform(data)

print("Original Data Shape:", data.shape)
print("Projected Data Shape:", projected_data.shape)

# Plot the original and projected data
plt.figure(figsize=(12, 6))

# Plot original data
ax = plt.subplot(1, 2, 1, projection='3d')
ax.scatter(data[:, 0], data[:, 1], data[:, 2])
ax.set_title('Original Data')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')

# Plot projected data
plt.subplot(1, 2, 2)
plt.scatter(projected_data[:, 0], projected_data[:, 1])
plt.title('Projected Data (2D)')
plt.xlabel('PC1')
plt.ylabel('PC2')

plt.tight_layout()
plt.show()

import numpy as np
import matplotlib.pyplot as plt

# 二次関数の定義とその勾配
def f(x):
    return x**2 + 2*x + 1

def df(x):
    return 2*x + 2

# 勾配降下法のパラメータ
learning_rate = 0.1
n_iterations = 100
x_init = 10  # 初期値

# 勾配降下法の実行
x = x_init
x_history = [x]

for _ in range(n_iterations):
    grad = df(x)
    x = x - learning_rate * grad
    x_history.append(x)

# 結果の表示
x_values = np.linspace(-15, 15, 400)
y_values = f(x_values)

plt.figure(figsize=(8, 6))
plt.plot(x_values, y_values, label='$f(x) = x^2 + 2x + 1$', color='blue')
plt.scatter(x_history, [f(x) for x in x_history], color='red', label='Gradient Descent Steps')
plt.title('Gradient Descent Optimization')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.grid(True)
plt.show()

print(f"最小値に近いxの値: {x}")




import numpy as np
import matplotlib.pyplot as plt

# 二次関数の定義とその勾配
def f(x):
    return x**2 + 2*x + 1

def df(x):
    return 2*x + 2

# SGDのパラメータ
learning_rate = 0.1
n_iterations = 100
x_init = 10  # 初期値

# SGDの実行
x = x_init
x_history = [x]

for _ in range(n_iterations):
    grad = df(x)  # ミニバッチを考慮せず全データでの勾配
    x = x - learning_rate * grad
    x_history.append(x)

# 結果の表示
x_values = np.linspace(-15, 15, 400)
y_values = f(x_values)

plt.figure(figsize=(8, 6))
plt.plot(x_values, y_values, label='$f(x) = x^2 + 2x + 1$', color='blue')
plt.scatter(x_history, [f(x) for x in x_history], color='red', label='SGD Steps')
plt.title('Stochastic Gradient Descent Optimization')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.grid(True)
plt.show()

print(f"最小値に近いxの値: {x}")


import numpy as np
import matplotlib.pyplot as plt

# 二次関数の定義とその勾配
def f(x):
    return x**2 + 2*x + 1

def df(x):
    return 2*x + 2

# Adamのパラメータ
learning_rate = 0.01
beta1 = 0.9
beta2 = 0.999
epsilon = 1e-8
n_iterations = 100
x_init = 10  # 初期値

# Adamの実行
x = x_init
m = 0
v = 0
m_hat = 0
v_hat = 0
t = 0
x_history = [x]

for _ in range(n_iterations):
    t += 1
    grad = df(x)
    m = beta1 * m + (1 - beta1) * grad
    v = beta2 * v + (1 - beta2) * (grad ** 2)
    m_hat = m / (1 - beta1 ** t)
    v_hat = v / (1 - beta2 ** t)
    x = x - learning_rate * m_hat / (np.sqrt(v_hat) + epsilon)
    x_history.append(x)

# 結果の表示
x_values = np.linspace(-15, 15, 400)
y_values = f(x_values)

plt.figure(figsize=(8, 6))
plt.plot(x_values, y_values, label='$f(x) = x^2 + 2x + 1$', color='blue')
plt.scatter(x_history, [f(x) for x in x_history], color='red', label='Adam Steps')
plt.title('Adam Optimization')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.grid(True)
plt.show()

print(f"最小値に近いxの値: {x}")




import numpy as np
import matplotlib.pyplot as plt

# 二次関数の定義とその勾配とヘッセ行列
def f(x):
    return x**2 + 2*x + 1

def df(x):
    return 2*x + 2

def d2f(x):
    return 2  # 二階微分(定数)

# ニュートン法のパラメータ
x_init = 10  # 初期値
n_iterations = 10

# ニュートン法の実行
x = x_init
x_history = [x]

for _ in range(n_iterations):
    grad = df(x)
    hessian = d2f(x)
    x = x - grad / hessian
    x_history.append(x)

# 結果の表示
x_values = np.linspace(-15, 15, 400)
y_values = f(x_values)

plt.figure(figsize=(8, 6))
plt.plot(x_values, y_values, label='$f(x) = x^2 + 2x + 1$', color='blue')
plt.scatter(x_history, [f(x) for x in x_history], color='red', label='Newton Steps')
plt.title('Newton\'s Method Optimization')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.grid(True)
plt.show()

print(f"最小値に近いxの値: {x}")


import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_curve, auc
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression

# データの生成
X, y = make_classification(n_samples=1000, n_features=20, n_classes=2, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# モデルの訓練
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_prob = model.predict_proba(X_test)[:, 1]  # 陽性クラスの確率

# 評価指標の計算
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)

# 評価指標をリストにまとめる
scores = [accuracy, precision, recall, f1]
labels = ['Accuracy', 'Precision', 'Recall', 'F1 Score']

# バーグラフのプロット
plt.figure(figsize=(12, 5))

# バーグラフ
plt.subplot(1, 2, 1)
bars = plt.bar(labels, scores, color=['blue', 'orange', 'green', 'red'])
plt.ylim(0, 1)  # スコアは0から1の範囲
plt.xlabel('Evaluation Metrics')
plt.ylabel('Score')
plt.title('Classification Model Evaluation Metrics')

# 各バーに値を表示
for bar in bars:
    yval = bar.get_height()
    plt.text(bar.get_x() + bar.get_width()/2, yval + 0.02, round(yval, 2), ha='center', va='bottom')

# ROC曲線とAUCの計算
fpr, tpr, _ = roc_curve(y_test, y_prob)
roc_auc = auc(fpr, tpr)

# ROC曲線のプロット
plt.subplot(1, 2, 2)
plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC)')
plt.legend(loc='lower right')

# グラフの表示
plt.tight_layout()
plt.show()

import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs

# サンプルデータの生成
X, y = make_blobs(n_samples=300, centers=4, cluster_std=0.60, random_state=0)

# k-meansクラスタリングモデルの作成
kmeans = KMeans(n_clusters=4)  # クラスタ数は4に設定
kmeans.fit(X)

# クラスタの中心を取得
centers = kmeans.cluster_centers_

# 各データポイントのクラスタラベルを取得
labels = kmeans.labels_

# クラスタリング結果のプロット
plt.scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis')
plt.scatter(centers[:, 0], centers[:, 1], s=300, c='red', marker='X')  # クラスタ中心
plt.title("k-means Clustering")
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.show()



import numpy as np
import matplotlib.pyplot as plt

# 凸関数:絶対値関数 (微分不可能な点がある)
def f(x):
    return np.abs(x)

# 劣勾配の計算 (x = 0 では複数の劣勾配が存在)
def subgradient(x):
    if x > 0:
        return 1
    elif x < 0:
        return -1
    else:
        return np.random.choice([-1, 1])

# 劣勾配法の実装
def subgradient_descent(initial_x, learning_rate=0.1, iterations=100):
    x = initial_x
    trajectory = [x]
    for i in range(iterations):
        grad = subgradient(x)
        x = x - learning_rate * grad
        trajectory.append(x)
    return trajectory

# 初期値とパラメータ設定
initial_x = 2.0
learning_rate = 0.1
iterations = 50

# 劣勾配法の適用
trajectory = subgradient_descent(initial_x, learning_rate, iterations)

# 結果のプロット
x_vals = np.linspace(-3, 3, 400)
y_vals = f(x_vals)
plt.plot(x_vals, y_vals, label='f(x) = |x|')
plt.plot(trajectory, [f(x) for x in trajectory], 'ro-', label='Subgradient Descent')
plt.title('Subgradient Descent on f(x) = |x|')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.show()





import numpy as np
import matplotlib.pyplot as plt

# 凸関数: f(x) = x^2
def f(x):
    return x**2

# 1階微分 (勾配)
def gradient(x):
    return 2 * x

# 2階微分 (ヘッセ行列)
def hessian(x):
    return 2

# ニュートン法の実装
def newton_method(initial_x, iterations=10):
    x = initial_x
    trajectory = [x]
    for i in range(iterations):
        grad = gradient(x)
        hess = hessian(x)
        x = x - grad / hess  # Newton法の更新ステップ
        trajectory.append(x)
    return trajectory

# 初期値とパラメータ設定
initial_x = 5.0
iterations = 10

# ニュートン法の適用
trajectory = newton_method(initial_x, iterations)

# 結果のプロット
x_vals = np.linspace(-6, 6, 400)
y_vals = f(x_vals)
plt.plot(x_vals, y_vals, label='f(x) = x^2')
plt.plot(trajectory, [f(x) for x in trajectory], 'ro-', label='Newton\'s Method')
plt.title('Newton\'s Method on f(x) = x^2')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.show()



import numpy as np

# 母集団データの生成
np.random.seed(0)
population = np.random.normal(loc=50, scale=10, size=10000)  # 母平均50, 母標準偏差10の母集団

# サンプル抽出
sample = np.random.choice(population, size=100, replace=False)

# 統計量の計算
sample_mean = np.mean(sample)
sample_var = np.var(sample, ddof=1)  # 標本分散(不偏分散)

# 母数との比較
population_mean = np.mean(population)
population_var = np.var(population)

print(f"母平均: {population_mean:.2f}, 標本平均: {sample_mean:.2f}")
print(f"母分散: {population_var:.2f}, 標本分散: {sample_var:.2f}")


import numpy as np
import matplotlib.pyplot as plt

# 二次関数 (例として f(x) = (x-3)^2 + 2)
def f(x):
    return (x - 3)**2 + 2

# 関数の勾配
def gradient(x):
    return 2 * (x - 3)

# 勾配降下法
def gradient_descent(initial_x, learning_rate=0.1, iterations=20):
    x = initial_x
    trajectory = [x]
    for i in range(iterations):
        grad = gradient(x)
        x = x - learning_rate * grad  # 勾配に沿った更新
        trajectory.append(x)
    return trajectory

# 初期値とパラメータ設定
initial_x = 0.0
learning_rate = 0.1
iterations = 20

# 勾配降下法の適用
trajectory = gradient_descent(initial_x, learning_rate, iterations)

# 結果のプロット
x_vals = np.linspace(-5, 10, 400)
y_vals = f(x_vals)
plt.plot(x_vals, y_vals, label='f(x) = (x-3)^2 + 2')
plt.plot(trajectory, [f(x) for x in trajectory], 'ro-', label='Gradient Descent')
plt.title('Gradient Descent on f(x)')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.show()





import numpy as np
import matplotlib.pyplot as plt

# 仮想の訓練データセット
X = np.array([1, 2, 3, 4, 5])
y = np.array([1, 2, 3, 4, 5])

# 線形回帰モデル: y = Wx + b
def predict(W, b, X):
    return W * X + b

# 損失関数 (平均二乗誤差)
def compute_loss(W, b, X, y):
    predictions = predict(W, b, X)
    return np.mean((predictions - y)**2)

# 勾配の計算
def compute_gradients(W, b, X, y):
    predictions = predict(W, b, X)
    dW = np.mean(2 * (predictions - y) * X)
    db = np.mean(2 * (predictions - y))
    return dW, db

# 確率的勾配降下法 (SGD) の実装
def stochastic_gradient_descent(W, b, X, y, learning_rate=0.01, iterations=100):
    W_values = [W]
    b_values = [b]
    for i in range(iterations):
        # ランダムにサンプルを選択
        idx = np.random.randint(0, len(X))
        X_i = X[idx:idx+1]
        y_i = y[idx:idx+1]
        
        # 勾配計算
        dW, db = compute_gradients(W, b, X_i, y_i)
        
        # パラメータ更新
        W -= learning_rate * dW
        b -= learning_rate * db
        
        # パラメータの記録
        W_values.append(W)
        b_values.append(b)
    
    return W, b, W_values, b_values

# 初期値設定
W = 0.0
b = 0.0
learning_rate = 0.01
iterations = 100

# SGDの適用
W, b, W_values, b_values = stochastic_gradient_descent(W, b, X, y, learning_rate, iterations)

# 最終的なWとb
print(f"最終的なW: {W}, b: {b}")

# 損失関数のプロット
loss_values = [compute_loss(W, b, X, y) for W, b in zip(W_values, b_values)]
plt.plot(loss_values)
plt.title('Loss Function (SGD)')
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.show()



import numpy as np
import matplotlib.pyplot as plt

# 目的関数 (二次関数) とその勾配
def f(x):
    return x**2

def grad_f(x):
    return 2 * x

# 減少ステップサイズを使った勾配降下法
def gradient_descent(grad_f, start_x, alpha=0.1, decay_rate=0.99, epsilon=1e-6, max_iter=1000):
    x = start_x
    history = [x]
    
    for i in range(max_iter):
        grad = grad_f(x)
        if np.abs(grad) < epsilon:
            print(f"Converged after {i} iterations")
            break
        x = x - alpha * grad
        alpha *= decay_rate  # ステップサイズを減少
        history.append(x)
    
    return x, history

# 初期値とパラメータ設定
start_x = 10.0  # 初期値
alpha = 0.1     # 初期のステップサイズ
decay_rate = 0.99  # ステップサイズの減少率
max_iter = 100   # 最大イテレーション回数

# 勾配降下法を実行
min_x, history = gradient_descent(grad_f, start_x, alpha, decay_rate)

# 結果のプロット
x_vals = np.linspace(-10, 10, 100)
y_vals = f(x_vals)

plt.plot(x_vals, y_vals, label="f(x) = x^2")
plt.plot(history, [f(x) for x in history], 'ro-', label="Gradient Descent Path")
plt.xlabel("x")
plt.ylabel("f(x)")
plt.legend()
plt.title("Gradient Descent with Decreasing Step Size")
plt.show()





import numpy as np
import matplotlib.pyplot as plt

# 1. シグモイド関数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

# 2. ソフトマックス関数
def softmax(x):
    e_x = np.exp(x - np.max(x))  # For numerical stability
    return e_x / e_x.sum(axis=0)

# 3. 1次元のガウス関数
def gaussian_1d(x, mu=0, sigma=1):
    return (1 / (sigma * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((x - mu) / sigma) ** 2)

# 4. 2次元のガウス関数
def gaussian_2d(x, y, mu_x=0, mu_y=0, sigma_x=1, sigma_y=1):
    return (1 / (2 * np.pi * sigma_x * sigma_y)) * np.exp(-0.5 * (((x - mu_x) / sigma_x) ** 2 + ((y - mu_y) / sigma_y) ** 2))

# x軸の範囲設定
x = np.linspace(-10, 10, 100)

# 1. シグモイド関数プロット
sigmoid_values = sigmoid(x)

# 2. ソフトマックス関数プロット
softmax_values = softmax(x)

# 3. 1次元のガウス関数プロット
gaussian_1d_values = gaussian_1d(x)

# 4. 2次元のガウス関数プロット用データ
x2 = np.linspace(-3, 3, 100)
y2 = np.linspace(-3, 3, 100)
X2, Y2 = np.meshgrid(x2, y2)
gaussian_2d_values = gaussian_2d(X2, Y2)

# プロット
plt.figure(figsize=(15, 10))

# 1. シグモイド関数のグラフ
plt.subplot(2, 2, 1)
plt.plot(x, sigmoid_values, label="Sigmoid Function", color="blue")
plt.title("Sigmoid Function")
plt.xlabel("x")
plt.ylabel("sigmoid(x)")
plt.grid(True)

# 2. ソフトマックス関数のグラフ
plt.subplot(2, 2, 2)
plt.plot(x, softmax_values, label="Softmax Function", color="green")
plt.title("Softmax Function")
plt.xlabel("x")
plt.ylabel("softmax(x)")
plt.grid(True)

# 3. 1次元のガウス関数のグラフ
plt.subplot(2, 2, 3)
plt.plot(x, gaussian_1d_values, label="1D Gaussian Function", color="red")
plt.title("1D Gaussian Function")
plt.xlabel("x")
plt.ylabel("Gaussian(x)")
plt.grid(True)

# 4. 2次元のガウス関数のグラフ
plt.subplot(2, 2, 4)
plt.contourf(X2, Y2, gaussian_2d_values, cmap="viridis")
plt.title("2D Gaussian Function")
plt.xlabel("x")
plt.ylabel("y")
plt.colorbar(label="Gaussian(x, y)")

plt.tight_layout()
plt.show()




import numpy as np
import matplotlib.pyplot as plt

# 直線モデル: y = w1 * x + w0
def linear_regression_1D(X, Y):
    n = len(X)
    X_mean = np.mean(X)
    Y_mean = np.mean(Y)
    
    # 傾きw1と切片w0の解析解
    w1 = np.sum((X - X_mean) * (Y - Y_mean)) / np.sum((X - X_mean) ** 2)
    w0 = Y_mean - w1 * X_mean
    return w1, w0

# 2次元面モデル: z = w1 * x1 + w2 * x2 + w0
def linear_regression_2D(X1, X2, Z):
    A = np.column_stack((X1, X2, np.ones(len(X1))))
    w = np.linalg.lstsq(A, Z, rcond=None)[0]  # 解析解
    return w  # w1, w2, w0

# D次元線形回帰モデル: y = w1 * x1 + w2 * x2 + ... + wD * xD + w0
def linear_regression_D(X, Y):
    X_augmented = np.column_stack((X, np.ones(X.shape[0])))
    w = np.linalg.lstsq(X_augmented, Y, rcond=None)[0]  # 解析解
    return w  # w1, w2, ..., wD, w0

# サンプルデータ生成
# 1次元直線モデルのサンプルデータ
X_1D = np.array([1, 2, 3, 4, 5])
Y_1D = np.array([2, 3, 4, 5, 6])

# 2次元面モデルのサンプルデータ
X1_2D = np.array([1, 2, 3, 4, 5])
X2_2D = np.array([2, 3, 4, 5, 6])
Z_2D = np.array([3, 5, 7, 9, 11])

# D次元線形回帰モデルのサンプルデータ
X_D = np.array([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]])  # 2次元入力
Y_D = np.array([5, 7, 9, 11, 13])

# 1次元直線モデルの解析解
w1_1D, w0_1D = linear_regression_1D(X_1D, Y_1D)
print(f"1次元直線モデルのパラメータ: w1 = {w1_1D}, w0 = {w0_1D}")

# 2次元面モデルの解析解
w1_2D, w2_2D, w0_2D = linear_regression_2D(X1_2D, X2_2D, Z_2D)
print(f"2次元面モデルのパラメータ: w1 = {w1_2D}, w2 = {w2_2D}, w0 = {w0_2D}")

# D次元線形回帰モデルの解析解
w_D = linear_regression_D(X_D, Y_D)
print(f"D次元線形回帰モデルのパラメータ: {w_D}")

# プロット (1次元直線モデルの結果)
plt.scatter(X_1D, Y_1D, color="blue", label="Data points")
plt.plot(X_1D, w1_1D * X_1D + w0_1D, color="red", label="Fitted line")
plt.xlabel("X")
plt.ylabel("Y")
plt.title("1D Linear Regression")
plt.legend()
plt.show()

import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error

# 原点を通らない面モデル: z = w1 * x1 + w2 * x2 + w0
def plane_model(X1, X2, Z):
    A = np.column_stack((X1, X2, np.ones(len(X1))))
    w = np.linalg.lstsq(A, Z, rcond=None)[0]  # 解析解
    return w  # w1, w2, w0

# 線形基底関数モデル: φ(x) = w1 * φ1(x) + w2 * φ2(x) + ... + wN * φN(x)
def linear_basis_function_model(X, Y, degree):
    poly = PolynomialFeatures(degree)
    X_poly = poly.fit_transform(X)
    model = LinearRegression()
    model.fit(X_poly, Y)
    return model, poly

# オーバーフィッティングの問題を視覚化
def overfitting_demo(X, Y, degree_high, degree_low):
    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
    
    # 高次モデル(オーバーフィット)
    model_high, poly_high = linear_basis_function_model(X_train, Y_train, degree_high)
    Y_pred_train_high = model_high.predict(poly_high.transform(X_train))
    Y_pred_test_high = model_high.predict(poly_high.transform(X_test))
    
    # 低次モデル(アンダーフィット)
    model_low, poly_low = linear_basis_function_model(X_train, Y_train, degree_low)
    Y_pred_train_low = model_low.predict(poly_low.transform(X_train))
    Y_pred_test_low = model_low.predict(poly_low.transform(X_test))
    
    # モデルの評価
    mse_train_high = mean_squared_error(Y_train, Y_pred_train_high)
    mse_test_high = mean_squared_error(Y_test, Y_pred_test_high)
    mse_train_low = mean_squared_error(Y_train, Y_pred_train_low)
    mse_test_low = mean_squared_error(Y_test, Y_pred_test_low)
    
    print(f"高次モデルのMSE (train): {mse_train_high}, (test): {mse_test_high}")
    print(f"低次モデルのMSE (train): {mse_train_low}, (test): {mse_test_low}")
    
    # プロット
    plt.scatter(X, Y, color="blue", label="Data points")
    
    # 高次モデルのプロット
    X_plot = np.linspace(min(X), max(X), 100).reshape(-1, 1)
    Y_plot_high = model_high.predict(poly_high.transform(X_plot))
    plt.plot(X_plot, Y_plot_high, color="red", label=f"High Degree (Overfit), degree={degree_high}")
    
    # 低次モデルのプロット
    Y_plot_low = model_low.predict(poly_low.transform(X_plot))
    plt.plot(X_plot, Y_plot_low, color="green", label=f"Low Degree (Underfit), degree={degree_low}")
    
    plt.xlabel("X")
    plt.ylabel("Y")
    plt.title("Overfitting vs Underfitting")
    plt.legend()
    plt.show()

# サンプルデータ生成
# 原点を通らない面モデルのサンプルデータ
X1_plane = np.array([1, 2, 3, 4, 5])
X2_plane = np.array([2, 3, 4, 5, 6])
Z_plane = np.array([5, 7, 9, 11, 13])

# 線形基底関数モデル用のサンプルデータ
X_basis = np.array([[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]])
Y_basis = np.array([2.5, 3.6, 4.9, 6.1, 7.2, 8.0, 8.8, 10.1, 11.2, 12.4])

# 原点を通らない面モデルの解析解
w1_plane, w2_plane, w0_plane = plane_model(X1_plane, X2_plane, Z_plane)
print(f"原点を通らない面モデルのパラメータ: w1 = {w1_plane}, w2 = {w2_plane}, w0 = {w0_plane}")

# 線形基底関数モデルの実行(2次元多項式基底関数を使用)
degree_basis = 2
model_basis, poly_basis = linear_basis_function_model(X_basis, Y_basis, degree_basis)
Y_pred_basis = model_basis.predict(poly_basis.transform(X_basis))
print(f"線形基底関数モデルの係数: {model_basis.coef_}")
print(f"線形基底関数モデルの切片: {model_basis.intercept_}")

# オーバーフィッティングの問題の視覚化(低次と高次の比較)
degree_high = 9  # 高次モデル
degree_low = 1   # 低次モデル
overfitting_demo(X_basis, Y_basis, degree_high, degree_low)


import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss, accuracy_score
from sklearn.datasets import make_classification
from mpl_toolkits.mplot3d import Axes3D

# 1次元クラス分類用のデータ生成(修正済み)
def generate_1d_classification_data():
    X, y = make_classification(n_samples=100, n_features=1, n_informative=1, n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1, random_state=42)
    return X, y

# 2次元クラス分類用のデータ生成(2クラス)
def generate_2d_classification_data():
    X, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_classes=2, n_clusters_per_class=1, random_state=42)
    return X, y

# 2次元クラス分類用のデータ生成(3クラス)
def generate_2d_classification_data_3classes():
    X, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_classes=3, n_clusters_per_class=1, random_state=42)
    return X, y

# ロジスティック回帰モデルのトレーニングと評価
def logistic_regression_model(X, y, num_classes=2):
    model = LogisticRegression(multi_class='ovr' if num_classes == 2 else 'multinomial', solver='lbfgs')
    model.fit(X, y)
    return model

# データプロット(1次元クラス分類)
def plot_1d_classification(X, y, model):
    plt.scatter(X, y, color='blue', label='Data points')
    X_plot = np.linspace(X.min(), X.max(), 100).reshape(-1, 1)
    y_prob = model.predict_proba(X_plot)[:, 1]
    plt.plot(X_plot, y_prob, color='red', label='Logistic regression')
    plt.xlabel("X")
    plt.ylabel("Probability")
    plt.title("1D Logistic Regression")
    plt.legend()
    plt.show()

# データプロット(2次元クラス分類)
def plot_2d_classification(X, y, model, num_classes=2):
    plt.scatter(X[:, 0], X[:, 1], c=y, cmap='viridis', label='Data points')
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))
    Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    plt.contourf(xx, yy, Z, alpha=0.5, cmap='viridis')
    plt.xlabel("X1")
    plt.ylabel("X2")
    plt.title(f"2D Logistic Regression (Classes={num_classes})")
    plt.show()

# ロジスティック回帰の結果を評価
def evaluate_model(model, X, y):
    y_pred = model.predict(X)
    y_prob = model.predict_proba(X)
    loss = log_loss(y, y_prob)
    accuracy = accuracy_score(y, y_pred)
    print(f"Log-Loss: {loss:.4f}, Accuracy: {accuracy:.4f}")

# 1次元クラス分類の実行
X_1d, y_1d = generate_1d_classification_data()
X_train_1d, X_test_1d, y_train_1d, y_test_1d = train_test_split(X_1d, y_1d, test_size=0.2, random_state=42)
model_1d = logistic_regression_model(X_train_1d, y_train_1d, num_classes=2)
evaluate_model(model_1d, X_test_1d, y_test_1d)
plot_1d_classification(X_test_1d, y_test_1d, model_1d)

# 2次元クラス分類の実行(2クラス)
X_2d, y_2d = generate_2d_classification_data()
X_train_2d, X_test_2d, y_train_2d, y_test_2d = train_test_split(X_2d, y_2d, test_size=0.2, random_state=42)
model_2d = logistic_regression_model(X_train_2d, y_train_2d, num_classes=2)
evaluate_model(model_2d, X_test_2d, y_test_2d)
plot_2d_classification(X_test_2d, y_test_2d, model_2d, num_classes=2)

# 2次元クラス分類の実行(3クラス)
X_3d, y_3d = generate_2d_classification_data_3classes()
X_train_3d, X_test_3d, y_train_3d, y_test_3d = train_test_split(X_3d, y_3d, test_size=0.2, random_state=42)
model_3d = logistic_regression_model(X_train_3d, y_train_3d, num_classes=3)
evaluate_model(model_3d, X_test_3d, y_test_3d)
plot_2d_classification(X_test_3d, y_test_3d, model_3d, num_classes=3)


import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss, accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification

# 1次元クラス分類用データ生成
def generate_1d_classification_data():
    X, y = make_classification(n_samples=100, n_features=1, n_informative=1, n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1, random_state=42)
    return X, y

# 2次元クラス分類用データ生成(2クラス)
def generate_2d_classification_data():
    X, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1, random_state=42)
    return X, y

# 2次元クラス分類用データ生成(3クラス)
def generate_2d_classification_data_3classes():
    X, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_repeated=0, n_classes=3, n_clusters_per_class=1, random_state=42)
    return X, y

# ロジスティック回帰モデルのトレーニングと評価
def logistic_regression_model(X, y, num_classes=2):
    model = LogisticRegression(multi_class='ovr' if num_classes == 2 else 'multinomial', solver='lbfgs')
    model.fit(X, y)
    return model

# データプロット(1次元クラス分類)
def plot_1d_classification(X, y, model):
    plt.scatter(X, y, color='blue', label='Data points')
    X_plot = np.linspace(X.min(), X.max(), 100).reshape(-1, 1)
    y_prob = model.predict_proba(X_plot)[:, 1]
    plt.plot(X_plot, y_prob, color='red', label='Logistic regression')
    plt.xlabel("X")
    plt.ylabel("Probability")
    plt.title("1D Logistic Regression")
    plt.legend()
    plt.show()

# データプロット(2次元クラス分類)
def plot_2d_classification(X, y, model, num_classes=2):
    plt.scatter(X[:, 0], X[:, 1], c=y, cmap='viridis', label='Data points')
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))
    Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    plt.contourf(xx, yy, Z, alpha=0.5, cmap='viridis')
    plt.xlabel("X1")
    plt.ylabel("X2")
    plt.title(f"2D Logistic Regression (Classes={num_classes})")
    plt.show()

# モデル評価(交差エントロピー誤差と正解率)
def evaluate_model(model, X, y):
    y_pred = model.predict(X)
    y_prob = model.predict_proba(X)
    loss = log_loss(y, y_prob)
    accuracy = accuracy_score(y, y_pred)
    print(f"Log-Loss: {loss:.4f}, Accuracy: {accuracy:.4f}")

# 勾配法(サンプル例: 簡単な勾配法による更新ステップ)
def gradient_descent(X, y, learning_rate=0.01, num_iterations=1000):
    m, n = X.shape
    weights = np.zeros(n)
    bias = 0
    for _ in range(num_iterations):
        linear_model = np.dot(X, weights) + bias
        predictions = 1 / (1 + np.exp(-linear_model))  # シグモイド関数
        dw = (1 / m) * np.dot(X.T, (predictions - y))
        db = (1 / m) * np.sum(predictions - y)
        weights -= learning_rate * dw
        bias -= learning_rate * db
    return weights, bias

# 1次元クラス分類の実行
X_1d, y_1d = generate_1d_classification_data()
X_train_1d, X_test_1d, y_train_1d, y_test_1d = train_test_split(X_1d, y_1d, test_size=0.2, random_state=42)
model_1d = logistic_regression_model(X_train_1d, y_train_1d, num_classes=2)
evaluate_model(model_1d, X_test_1d, y_test_1d)
plot_1d_classification(X_test_1d, y_test_1d, model_1d)

# 2次元クラス分類の実行(2クラス)
X_2d, y_2d = generate_2d_classification_data()
X_train_2d, X_test_2d, y_train_2d, y_test_2d = train_test_split(X_2d, y_2d, test_size=0.2, random_state=42)
model_2d = logistic_regression_model(X_train_2d, y_train_2d, num_classes=2)
evaluate_model(model_2d, X_test_2d, y_test_2d)
plot_2d_classification(X_test_2d, y_test_2d, model_2d, num_classes=2)

# 2次元クラス分類の実行(3クラス)
X_3d, y_3d = generate_2d_classification_data_3classes()
X_train_3d, X_test_3d, y_train_3d, y_test_3d = train_test_split(X_3d, y_3d, test_size=0.2, random_state=42)
model_3d = logistic_regression_model(X_train_3d, y_train_3d, num_classes=3)
evaluate_model(model_3d, X_test_3d, y_test_3d)
plot_2d_classification(X_test_3d, y_test_3d, model_3d, num_classes=3)

# 勾配法の例を実行(1次元データ)
weights, bias = gradient_descent(X_train_1d, y_train_1d)
print(f"Weights: {weights}, Bias: {bias}")



import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD

# ニューロンモデル
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):
    return sigmoid(x) * (1 - sigmoid(x))

# 2層フィードフォワードニューラルネットワークの実装
class TwoLayerNN:
    def __init__(self, input_size, hidden_size, output_size):
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        # 初期化
        self.W1 = np.random.randn(self.input_size, self.hidden_size)
        self.b1 = np.zeros((1, self.hidden_size))
        self.W2 = np.random.randn(self.hidden_size, self.output_size)
        self.b2 = np.zeros((1, self.output_size))

    def forward(self, X):
        self.hidden = sigmoid(np.dot(X, self.W1) + self.b1)
        self.output = sigmoid(np.dot(self.hidden, self.W2) + self.b2)
        return self.output

    def backward(self, X, y, learning_rate=0.01):
        m = X.shape[0]
        output_error = self.output - y
        output_delta = output_error * sigmoid_derivative(self.output)

        hidden_error = np.dot(output_delta, self.W2.T)
        hidden_delta = hidden_error * sigmoid_derivative(self.hidden)

        # パラメータの更新
        self.W2 -= learning_rate * np.dot(self.hidden.T, output_delta) / m
        self.b2 -= learning_rate * np.sum(output_delta, axis=0) / m
        self.W1 -= learning_rate * np.dot(X.T, hidden_delta) / m
        self.b1 -= learning_rate * np.sum(hidden_delta, axis=0) / m

    def train(self, X, y, epochs=1000, learning_rate=0.01):
        for epoch in range(epochs):
            self.forward(X)
            self.backward(X, y, learning_rate)

# データの生成と前処理
X, y = make_classification(n_samples=1000, n_features=4, n_informative=2, n_redundant=0, n_repeated=0, n_classes=2, random_state=42)
y = y.reshape(-1, 1)  # ニューラルネットワークのために y を列ベクトルに変換
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# ニューラルネットワークの実行
nn = TwoLayerNN(input_size=4, hidden_size=5, output_size=1)
nn.train(X_train, y_train, epochs=1000, learning_rate=0.01)

# テストデータでの評価
y_pred = nn.forward(X_test)
y_pred = (y_pred > 0.5).astype(int)
accuracy = accuracy_score(y_test, y_pred)
print(f'ニューラルネットワークの精度: {accuracy:.4f}')

# Kerasでニューラルネットワークモデルを構築
def build_keras_model():
    model = Sequential()
    model.add(Dense(5, input_dim=4, activation='sigmoid'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer=SGD(learning_rate=0.01), loss='binary_crossentropy', metrics=['accuracy'])
    return model

# Kerasでのモデルのトレーニングと評価
keras_model = build_keras_model()
keras_model.fit(X_train, y_train, epochs=100, verbose=0)
loss, accuracy = keras_model.evaluate(X_test, y_test, verbose=0)
print(f'Kerasモデルの精度: {accuracy:.4f}')


import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import Adam

# MNISTデータセットのロード
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# データの前処理
X_train = X_train.reshape(-1, 28, 28, 1).astype('float32') / 255.0
X_test = X_test.reshape(-1, 28, 28, 1).astype('float32') / 255.0
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)

# 畳み込みニューラルネットワークモデルの構築
model = Sequential()

# 畳み込み層
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

# Flatten層と全結合層
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))

# モデルのコンパイル
model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])

# モデルのトレーニング
history = model.fit(X_train, y_train, epochs=10, batch_size=64, validation_split=0.2, verbose=1)

# モデルの評価
test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=0)
print(f'テストデータでの損失: {test_loss:.4f}')
print(f'テストデータでの精度: {test_accuracy:.4f}')

# トレーニングと検証の損失と精度のプロット
plt.figure(figsize=(12, 4))

# 精度のプロット
plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], label='train accuracy')
plt.plot(history.history['val_accuracy'], label='validation accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

# 損失のプロット
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'], label='validation loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()




import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture

# 2次元入力データの生成
np.random.seed(42)
n_samples = 500
X = np.concatenate([np.random.randn(n_samples // 2, 2) * 0.5 + np.array([1, 1]),
                     np.random.randn(n_samples // 2, 2) * 0.5 + np.array([-1, -1])])

# K-meansクラスタリング
kmeans = KMeans(n_clusters=2, random_state=42)
kmeans_labels = kmeans.fit_predict(X)
kmeans_centers = kmeans.cluster_centers_

# ガウス混合モデルのフィッティング
gmm = GaussianMixture(n_components=2, random_state=42)
gmm_labels = gmm.fit_predict(X)
gmm_means = gmm.means_

# 結果の可視化
fig, axs = plt.subplots(1, 2, figsize=(12, 6))

# K-meansの結果
axs[0].scatter(X[:, 0], X[:, 1], c=kmeans_labels, s=30, cmap='viridis')
axs[0].scatter(kmeans_centers[:, 0], kmeans_centers[:, 1], c='red', s=100, alpha=0.75, marker='X')
axs[0].set_title('K-means Clustering')

# GMMの結果
axs[1].scatter(X[:, 0], X[:, 1], c=gmm_labels, s=30, cmap='viridis')
axs[1].scatter(gmm_means[:, 0], gmm_means[:, 1], c='red', s=100, alpha=0.75, marker='X')
axs[1].set_title('Gaussian Mixture Model')

plt.show()

# EMアルゴリズムの概要
# Step 0: 初期化
print("Step 0: Initialization")
print("Initialize the parameters (means, covariances, and weights)")

# Step 1: E Step
print("\nStep 1: E Step")
print("Calculate the probability of each point belonging to each cluster")

# Step 2: M Step
print("\nStep 2: M Step")
print("Update the parameters (means, covariances, and weights) based on the probabilities")

# 尤度の計算
log_likelihood = gmm.score(X)
print("\nLikelihood (Log-Likelihood) of the data given the model: ", log_likelihood)


import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from transformers import BertTokenizer, BertModel
from sklearn.datasets import make_moons

# 1. モンテカルロツリーサーチの概要実装
class MCTSNode:
    def __init__(self, state, parent=None):
        self.state = state
        self.parent = parent
        self.children = []
        self.visits = 0
        self.value = 0
    
    def select_child(self):
        # UCB1 を用いた子ノードの選択
        return max(self.children, key=lambda x: x.value / (x.visits + 1e-5) + np.sqrt(2 * np.log(self.visits) / (x.visits + 1e-5)))

def mcts_simulation():
    root = MCTSNode(state="root")
    # 簡易的なMCTSの実行
    for _ in range(10):
        node = root
        while node.children:
            node = node.select_child()
        node.visits += 1
        node.value += np.random.random()  # ランダムな報酬
    return root

# 2. GAN の概要実装 (ディープラーニングを用いた簡易モデル)
class SimpleGAN(nn.Module):
    def __init__(self):
        super(SimpleGAN, self).__init__()
        self.generator = nn.Sequential(
            nn.Linear(10, 32),
            nn.ReLU(),
            nn.Linear(32, 2)
        )
        self.discriminator = nn.Sequential(
            nn.Linear(2, 32),
            nn.ReLU(),
            nn.Linear(32, 1),
            nn.Sigmoid()
        )

    def forward(self, x):
        return self.generator(x)

def gan_example():
    gan = SimpleGAN()
    optimizer = optim.Adam(gan.parameters(), lr=0.001)
    # 簡単なトレーニングループ
    for _ in range(5):
        real_data, _ = make_moons(n_samples=10, noise=0.1)
        fake_data = gan.generator(torch.randn(10, 10).float())
        real_labels = torch.ones(10, 1)
        fake_labels = torch.zeros(10, 1)
        real_loss = torch.mean((gan.discriminator(torch.tensor(real_data).float()) - real_labels) ** 2)
        fake_loss = torch.mean((gan.discriminator(fake_data) - fake_labels) ** 2)
        loss = real_loss + fake_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    return fake_data

# 3. BERT の概要実装
def bert_example(text="Hello, how are you?"):
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
    model = BertModel.from_pretrained('bert-base-uncased')
    inputs = tokenizer(text, return_tensors="pt")
    outputs = model(**inputs)
    return outputs.last_hidden_state

# 5. 量子アニーリングの概要実装
def quantum_annealing_example():
    # ここでは量子ビットの簡単なエネルギー最適化をシミュレーション
    energy = np.random.random()
    return energy

# 各技術のデモを実行
mcts_result = mcts_simulation()
gan_result = gan_example()
bert_result = bert_example()
# quantum_cryptography_result = quantum_cryptography_example() # コメントアウト
quantum_annealing_result = quantum_annealing_example()

print("MCTS Simulation Result:", mcts_result)
print("GAN Generated Data:", gan_result)
print("BERT Output:", bert_result)
# print("Quantum Cryptography Result:", quantum_cryptography_result) # コメントアウト
print("Quantum Annealing Energy:", quantum_annealing_result)






0
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?