2
3

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

Pythonで青チャートや基礎問題精講数学を解きつつ、AI・機械学習に応用

Last updated at Posted at 2025-04-02

はじめに

機械学習やAIの勉強をしていると、「やっぱり数学が必要だな」と感じて、「大学受験の数学からやり直さなきゃ」と思う人も多いでしょう。実際、大学受験用の数学の参考書を開いてみたことがある人もいるはずです。

そこで本記事では、受験数学の典型問題をPythonで実際に解きながら、機械学習やAIとの関係や応用方法を、追加コードで紹介するという暴挙に出ました。

コードはすべて貼っておくので、大学受験生も、エンジニア初心者の方も、気軽に試してみてください。もし分からない部分があれば、生成AIなどに聞いてもらってOKです!

Python導入

青チャート1A例題5とニューラルネットワーク

# (a + 2) と (a + 2)^2 の展開と代入、ニューラルネットワーク計算への応用
# Expand and substitute (a + 2) and (a + 2)^2, apply to neural network calculation

# SymPyを使って代数処理を行う
# Using SymPy for symbolic math
import sympy as sp

# 変数 a を定義
# Define symbolic variable a
a = sp.Symbol('a')

# (a + 2) の展開
expr1 = a + 2
# (a + 2)^2 の展開
expr2 = (a + 2)**2

# 結果の表示(展開形式)
# Show expanded forms
print("展開 (Expansion):")
print("a + 2 =", expr1)
print("(a + 2)^2 =", sp.expand(expr2))

# a = 2 のときの値を代入して評価
# Substitute a = 2 and evaluate
value1 = expr1.subs(a, 2)
value2 = expr2.subs(a, 2)

print("\na = 2 のときの値 (Values when a = 2):")
print("a + 2 =", value1)
print("(a + 2)^2 =", value2)

# --------------------------
# ニューラルネットワークの1層分の計算への応用
# Applying to a neural network layer calculation
# 例: z = w1*x1 + w2*x2 + w3*x3 + b

# 入力値(a+2, (a+2)^2)を使う
# Use input1 = a + 2, input2 = (a + 2)^2
input1 = value1
input2 = value2
input3 = 1  # 仮の入力値3
w1, w2, w3 = 0.5, -0.3, 0.8  # 重み (weights)
b = 0.1  # バイアス (bias)

# 総和計算
# Linear combination calculation
z = w1 * input1 + w2 * input2 + w3 * input3 + b

print("\nニューラルネットワークの出力 (Neural Network Output):")
print(f"z = {z}")

基礎問題精講1A例題25とニューラルネットワーク

import numpy as np
import matplotlib.pyplot as plt

# 重みとバイアスを定義(Define weight and bias)
# y = 2x - 1 → weight = 2, bias = -1
weight = 2  # 重み (weight)
bias = -1   # バイアス (bias)

# 特徴量入力(横軸)の範囲を生成(Generate input feature values)
x = np.linspace(-5, 5, 100)  # -5から5まで100点

# 目的関数 y = 重み×x + バイアス を計算(Compute objective function)
y = weight * x + bias

# グラフ描画(Plot the graph)
plt.figure(figsize=(8, 5))
plt.plot(x, y, label='Objective Function: y = 2x - 1')

# 軸ラベル・タイトル・グリッド(Add labels and grid)
plt.xlabel('Feature Input x')       # 横軸:特徴量入力
plt.ylabel('Objective Function y')  # 縦軸:目的関数
plt.title('Linear Objective Function: y = weight × input + bias')
plt.grid(True)
plt.axhline(0, color='black', linewidth=0.8)
plt.axvline(0, color='black', linewidth=0.8)
plt.legend()
plt.show()

基礎問題精講1A例題27と2次関数と機械学習的最適化

import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
import random
import autograd.numpy as anp  # Importing autograd's version of numpy
from autograd import grad  # To compute the gradient

# --- 🔧 係数を変数として定義 / Define coefficients as variables ---
a = 2
b = -4
c = 3

# --- ✅ 関数と導関数を定義 / Define quadratic function and its derivative ---
def f(x):
    return a * x**2 + b * x + c  # 二次関数 / Quadratic function

# Automatically compute the derivative using autograd
df_autograd = grad(f)  # Get the gradient of f(x)

# ========================================================
# 📘 1. 数学的手法 / Math-based Methods
# ========================================================
x_vertex = -b / (2 * a)
y_vertex = f(x_vertex)

x_diff = -b / (2 * a)
y_diff = f(x_diff)

# =============================================
# 🤖 2. 数値的最適化 / Optimization Approaches
# =============================================

# 勾配降下法 / Gradient Descent
x_gd = 5.0
lr = 0.1
for i in range(100):
    x_gd -= lr * df_autograd(x_gd)  # Using autograd for derivative
y_gd = f(x_gd)

# Scipy最適化 / Scipy Optimization
res = minimize(lambda x: f(x[0]), x0=[0])
x_scipy = res.x[0]
y_scipy = res.fun

# SGD風 / Stochastic Gradient Descent Style
x_sgd = 5.0
for i in range(100):
    x_sgd -= lr * df_autograd(x_sgd)  # Using autograd for derivative
y_sgd = f(x_sgd)

# モーメンタム付きGD / Momentum-based GD
x_momentum = 5.0
v = 0
beta = 0.9
for i in range(100):
    grad_val = df_autograd(x_momentum)  # Using autograd for derivative
    v = beta * v + (1 - beta) * grad_val
    x_momentum -= lr * v
y_momentum = f(x_momentum)

# =============================================
# 🤖 3. ベイズ最適化 / Bayesian Optimization
# =============================================

# 初期観測点 / Initial observed points
X_obs = np.array([[-2.0], [0.0], [2.0]])
y_obs = np.array([f(x[0]) for x in X_obs])

# カーネルとGPモデルを定義 / Define kernel and GP model
kernel = C(1.0, (1e-3, 1e3)) * RBF(length_scale=1.0)
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=10)

# 探索範囲 / Search space
X = np.atleast_2d(np.linspace(-2, 5, 1000)).T

# ベイズ最適化ループ / Bayesian Optimization loop
for i in range(5):
    gp.fit(X_obs, y_obs)
    y_pred, sigma = gp.predict(X, return_std=True)
    next_x = X[np.argmin(y_pred)]
    next_y = f(next_x[0])
    X_obs = np.vstack((X_obs, [next_x]))
    y_obs = np.append(y_obs, next_y)

x_bayes = X_obs[np.argmin(y_obs)][0]
y_bayes = np.min(y_obs)

# =============================================
# 🤖 4. 遺伝的アルゴリズム / Genetic Algorithm
# =============================================

# パラメータ設定 / GA parameters
population_size = 20
generations = 50
mutation_rate = 0.1
domain = (-2, 5)

# 初期集団 / Initial population
population = np.random.uniform(domain[0], domain[1], population_size)

for _ in range(generations):
    fitness = np.array([f(x) for x in population])
    selected = population[np.argsort(fitness)[:population_size//2]]
    children = []
    for _ in range(population_size // 2):
        p1, p2 = np.random.choice(selected, 2)
        child = (p1 + p2) / 2
        if random.random() < mutation_rate:
            child += np.random.normal(0, 0.1)
        children.append(np.clip(child, domain[0], domain[1]))
    population = np.concatenate((selected, children))

x_ga = population[np.argmin([f(x) for x in population])]
y_ga = f(x_ga)

# ================================
# 📊 結果表示 / Print Results
# ================================
print("📘 [Math] Vertex Method:     x =", x_vertex, ", f(x) =", y_vertex)
print("📘 [Math] Derivative Method: x =", x_diff, ", f(x) =", y_diff)
print("🤖 [GD] Gradient Descent:    x =", x_gd, ", f(x) =", y_gd)
print("🤖 [SGD-style]:              x =", x_sgd, ", f(x) =", y_sgd)
print("🤖 [Momentum]:               x =", x_momentum, ", f(x) =", y_momentum)
print("🧮 [Scipy]:                  x =", x_scipy, ", f(x) =", y_scipy)
print("🔍 [Bayesian]:               x =", x_bayes, ", f(x) =", y_bayes)
print("🧬 [Genetic Algorithm]:      x =", x_ga, ", f(x) =", y_ga)

# ================================
# 🖼 Plot the curve with English labels
# ================================
x_vals = np.linspace(-2, 5, 300)
y_vals = f(x_vals)

plt.figure(figsize=(10, 6))
plt.plot(x_vals, y_vals, label='f(x) = ax² + bx + c', color='black')
plt.plot(x_vertex, y_vertex, 'ro', label='Vertex (Math)')
plt.plot(x_gd, y_gd, 'gx', label='Gradient Descent')
plt.plot(x_sgd, y_sgd, 'm^', label='SGD-style')
plt.plot(x_momentum, y_momentum, 'bo', label='Momentum')
plt.plot(x_scipy, y_scipy, 'k*', label='Scipy Minimize')
plt.plot(x_bayes, y_bayes, 'ys', label='Bayesian Optimization')
plt.plot(x_ga, y_ga, 'cP', label='Genetic Algorithm')

# 英語ラベル / English axis labels and title
plt.xlabel('x (Input Feature)')
plt.ylabel('f(x) (Objective Function)')
plt.title('Minimization of Quadratic Function by Various Methods')
plt.legend()
plt.grid(True)
plt.show()


基礎問題精講1A例題145と最小二乗法

import numpy as np
import matplotlib.pyplot as plt

# データの準備 / Prepare data
# 1回戦と2回戦の得点データ / Data for the 1st and 2nd round scores
x = np.array([33, 40, 44, 38, 29, 43, 33, 40, 38, 30])  # 1回戦 (説明変数 x) / 1st round (Explanatory variable: x)
y = np.array([37, 34, 44, 35, 30, 41, 33, 40, 41, 37])  # 2回戦 (目的変数 y) / 2nd round (Response variable: y)

# 平均値の計算 / Calculate the averages (means)
mean_x = np.mean(x)  # 1回戦の平均 / Average of 1st round
mean_y = np.mean(y)  # 2回戦の平均 / Average of 2nd round

# 分散の計算 / Calculate the variances
var_x = np.var(x)  # 1回戦の分散 / Variance of 1st round
var_y = np.var(y)  # 2回戦の分散 / Variance of 2nd round

# 共分散の計算 / Calculate the covariance
cov_xy = np.cov(x, y)[0][1]  # 共分散 / Covariance between x and y

# 相関係数の計算 / Calculate the correlation coefficient
corr_xy = cov_xy / (np.sqrt(var_x) * np.sqrt(var_y))  # 相関係数 / Correlation coefficient

# 回帰直線の計算 / Calculate the regression line (y = mx + b)
# 最小二乗法を使用して回帰係数(傾き m と切片 b)を計算 / Use Least Squares Method to calculate regression coefficients (slope m and intercept b)
m = cov_xy / var_x  # 傾き / Slope
b = mean_y - m * mean_x  # 切片 / Intercept

# 回帰直線の予測値の計算 / Calculate the predicted values for the regression line
y_pred = m * x + b  # 予測値 / Predicted values

# 残差の計算 / Calculate the residuals
residuals = y - y_pred  # 観測値と予測値の差 / The difference between observed and predicted values

# 残差平方和の計算 / Calculate the sum of squared residuals (SSR)
SSR = np.sum(residuals**2)  # 残差の二乗和 / Sum of squared residuals

# 決定係数 R² の計算 / Calculate the coefficient of determination (R²)
SS_tot = np.sum((y - mean_y)**2)  # 全変動の平方和 / Total sum of squares
R_squared = 1 - (SSR / SS_tot)  # 決定係数 / R²

# 結果を表示 / Display the results
print(f"1st round average (x): {mean_x}")  # 1回戦の平均 / Average of 1st round
print(f"2nd round average (y): {mean_y}")  # 2回戦の平均 / Average of 2nd round
print(f"1st round variance (s^2_x): {var_x}")  # 1回戦の分散 / Variance of 1st round
print(f"2nd round variance (s^2_y): {var_y}")  # 2回戦の分散 / Variance of 2nd round
print(f"Covariance (S_xy): {cov_xy}")  # 共分散 / Covariance
print(f"Correlation coefficient (r): {corr_xy:.4f}")  # 相関係数 / Correlation coefficient
print(f"Regression slope (m): {m:.4f}")  # 回帰直線の傾き / Slope of regression line
print(f"Regression intercept (b): {b:.4f}")  # 回帰直線の切片 / Intercept of regression line
print(f"Sum of squared residuals (SSR): {SSR:.4f}")  # 残差平方和 / Sum of squared residuals
print(f"Coefficient of determination (R²): {R_squared:.4f}")  # 決定係数 / Coefficient of determination (R²)

# 回帰直線をプロット / Plot the regression line
plt.figure(figsize=(10, 5))

# データ点と回帰直線のプロット / Plot data points and regression line
plt.subplot(1, 2, 1)  # サブプロット (左) / Subplot (left)
plt.scatter(x, y, color='blue', label='Data points')  # データ点 / Data points
plt.plot(x, y_pred, color='red', label='Regression line')  # 回帰直線 / Regression line
plt.xlabel('1st round scores (x)')  # 1回戦の得点 / 1st round scores (x)
plt.ylabel('2nd round scores (y)')  # 2回戦の得点 / 2nd round scores (y)
plt.legend()
plt.title('Regression Line and Data Points')  # 回帰直線とデータ点 / Regression line and data points

# 箱ひげ図のプロット / Plot box plot
plt.subplot(1, 2, 2)  # サブプロット (右) / Subplot (right)
plt.boxplot([x, y], labels=['1st round', '2nd round'])  # 1回戦と2回戦の箱ひげ図 / Box plot for 1st and 2nd round
plt.title('Box Plot of 1st and 2nd Round Scores')  # 1回戦と2回戦の箱ひげ図 / Box plot of 1st and 2nd round scores

# グラフを表示 / Show the plot
plt.tight_layout()  # レイアウト調整 / Adjust layout
plt.show()

基礎問題精講2B例題6と分数式の計算と伝達関数

!pip install control
import numpy as np
import matplotlib.pyplot as plt
import control as ctrl
import sympy as sp

# 1. 1/(x(x+1)) と (1/x) - (1/(x+1)) の比較
# xの範囲を定義 / Define the range for x
x = np.linspace(0.1, 10, 100)  # xの範囲 / Range for x (avoid zero to prevent division by zero)

# 式1: 1/(x(x+1)) の計算 / Calculate 1/(x(x+1))
expr1 = 1 / (x * (x + 1))

# 式2: (1/x) - (1/(x+1)) の計算 / Calculate (1/x) - (1/(x+1))
expr2 = (1 / x) - (1 / (x + 1))

# 比較プロット / Comparison plot
plt.figure(figsize=(8, 6))
plt.plot(x, expr1, label=r'$1/(x(x+1))$', color='blue')
plt.plot(x, expr2, label=r'$(1/x) - (1/(x+1))$', color='red', linestyle='--')
plt.xlabel('x')
plt.ylabel('Value')
plt.title('Comparison of $1/(x(x+1))$ and $(1/x) - (1/(x+1))$')
plt.legend()
plt.grid(True)
plt.show()

# 2. 1/(s(s+1)) のナイキスト線図、ボード線図、位相プロット
# 転送関数 G(s) = 1 / (s * (s + 1)) の定義 / Define the transfer function G(s) = 1 / (s * (s + 1))
s = ctrl.TransferFunction.s  # sはラプラス変換の変数
G = 1 / (s * (s + 1))

# ナイキスト線図のプロット / Nyquist plot
plt.figure(figsize=(8, 6))
ctrl.nyquist(G)
plt.title('Nyquist Plot of $G(s) = 1/(s(s+1))$')
plt.grid(True)
plt.show()

# ボード線図のプロット / Bode plot
plt.figure(figsize=(8, 6))
ctrl.bode(G, dB=True, Hz=True, deg=True)
plt.suptitle('Bode Plot of $G(s) = 1/(s(s+1))$')
plt.show()


# 3. 1/(s(s+1)) の逆ラプラス変換
# ラプラス変換の変数sを定義 / Define the Laplace variable s
s = sp.symbols('s')

# 転送関数 G(s) = 1/(s * (s + 1)) を定義 / Define the transfer function G(s) = 1/(s * (s + 1))
G_s = 1 / (s * (s + 1))

# 逆ラプラス変換 / Inverse Laplace Transform
t = sp.symbols('t')
G_t = sp.inverse_laplace_transform(G_s, s, t)

# 結果の表示 / Display the result
print(f"Inverse Laplace Transform of 1/(s(s+1)): {G_t}")


基礎問題精講2B 基本例題86 連立方程式と対称な点Qの座標


import numpy as np
import pandas as pd
from numpy.linalg import inv, solve, det, lstsq, norm
from scipy.linalg import lu, qr, svd
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import spsolve
from sklearn.linear_model import Ridge, Lasso, ElasticNet, BayesianRidge, LinearRegression, RANSACRegressor
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import PLSRegression

# 行列 A と ベクトル b
A = np.array([[2.0, -1.0],
              [1.0,  2.0]])
b = np.array([2.0, 10.0])
x0 = np.zeros_like(b)
tolerance = 1e-10
max_iter = 1000
omega = 1.25

# 解を保存する辞書
solutions = {}

# 1. Substitution Method
q = (2 * 10 - 2) / (4 + 1)
p = 10 - 2 * q
solutions['Substitution Method'] = np.array([p, q])

# 2. Elimination Method
q = 2
p = (2 + q) / 2
solutions['Elimination Method'] = np.array([p, q])

# 3. Coefficient Comparison
solutions['Coefficient Comparison'] = solutions['Substitution Method']

# 4. Inverse Matrix
solutions['Inverse Matrix'] = inv(A) @ b

# 5. Cramer's Rule
det_A = det(A)
A1, A2 = A.copy(), A.copy()
A1[:, 0], A2[:, 1] = b, b
solutions["Cramer's Rule"] = np.array([det(A1)/det_A, det(A2)/det_A])

# 6. Gaussian Elimination
Ab = np.hstack([A, b.reshape(-1,1)]).astype(float)
Ab[1] = Ab[1] - (Ab[1,0]/Ab[0,0]) * Ab[0]
q = Ab[1,2] / Ab[1,1]
p = (Ab[0,2] - Ab[0,1]*q) / Ab[0,0]
solutions['Gaussian Elimination'] = np.array([p, q])

# 7. Gauss–Jordan Elimination
Ab = np.hstack([A, b.reshape(-1,1)]).astype(float)
Ab[0] /= Ab[0,0]
Ab[1] -= Ab[1,0] * Ab[0]
Ab[1] /= Ab[1,1]
Ab[0] -= Ab[0,1] * Ab[1]
solutions['Gauss–Jordan Elimination'] = Ab[:,2]

# 8. LU Decomposition
P, L, U = lu(A)
y = solve(L, P @ b)
x = solve(U, y)
solutions['LU Decomposition'] = x

# 9. QR Decomposition
Q, R = qr(A)
y = Q.T @ b
solutions['QR Decomposition'] = solve(R, y)

# 10. SVD
U_svd, S_svd, VT = svd(A)
S_inv = np.diag(1/S_svd)
solutions['SVD'] = VT.T @ S_inv @ U_svd.T @ b

# 11. Jacobi Method
def jacobi(A, b):
    x = np.zeros_like(b, dtype=float)
    for _ in range(max_iter):
        x_new = np.zeros_like(x)
        x_new[0] = (b[0] + x[1]) / 2
        x_new[1] = (b[1] - x[0]) / 2
        if norm(x_new - x) < tolerance:
            break
        x = x_new
    return x
solutions['Jacobi Method'] = jacobi(A, b)

# 12. Gauss–Seidel Method
def gauss_seidel(A, b):
    x = np.zeros_like(b, dtype=float)
    for _ in range(max_iter):
        x_old = x.copy()
        x[0] = (b[0] + x[1]) / 2
        x[1] = (b[1] - x[0]) / 2
        if norm(x - x_old) < tolerance:
            break
    return x
solutions['Gauss–Seidel Method'] = gauss_seidel(A, b)

# 13. SOR Method
def sor(A, b):
    x = np.zeros_like(b, dtype=float)
    for _ in range(max_iter):
        x_old = x.copy()
        x[0] = (1 - omega) * x[0] + omega * (b[0] + x[1]) / 2
        x[1] = (1 - omega) * x[1] + omega * (b[1] - x[0]) / 2
        if norm(x - x_old) < tolerance:
            break
    return x
solutions['SOR Method'] = sor(A, b)

# 14. SSOR Method
def ssor(A, b):
    x = np.zeros_like(b, dtype=float)
    for _ in range(max_iter):
        x_old = x.copy()
        x[0] = (1 - omega) * x[0] + omega * (b[0] + x[1]) / 2
        x[1] = (1 - omega) * x[1] + omega * (b[1] - x[0]) / 2
        x[1] = (1 - omega) * x[1] + omega * (b[1] - x[0]) / 2
        x[0] = (1 - omega) * x[0] + omega * (b[0] + x[1]) / 2
        if norm(x - x_old) < tolerance:
            break
    return x
solutions['SSOR Method'] = ssor(A, b)

# 15. Conjugate Gradient
def cg(A, b):
    x = np.zeros_like(b)
    r = b - A @ x
    p = r.copy()
    for _ in range(max_iter):
        Ap = A @ p
        alpha = r @ r / (p @ Ap)
        x += alpha * p
        r_new = r - alpha * Ap
        if norm(r_new) < tolerance:
            break
        beta = r_new @ r_new / (r @ r)
        p = r_new + beta * p
        r = r_new
    return x
solutions['Conjugate Gradient'] = cg(A, b)

# 16. Steepest Descent
def steepest(A, b):
    x = np.zeros_like(b)
    for _ in range(max_iter):
        r = b - A @ x
        alpha = (r @ r) / (r @ (A @ r))
        x += alpha * r
        if norm(r) < tolerance:
            break
    return x
solutions['Steepest Descent'] = steepest(A, b)

# 17–20 (小規模用代用)
solutions['Multigrid (Jacobi Proxy)'] = jacobi(A, b)
solutions['GMRES Proxy'] = solve(A, b)
solutions['BiCG Proxy'] = solve(A, b)
solutions['BiCGSTAB Proxy'] = solve(A, b)

# 21. Least Squares
solutions['Least Squares'] = lstsq(A, b, rcond=None)[0]

# 22. Ridge
ridge = Ridge(alpha=1e-5, fit_intercept=False).fit(A, b)
solutions['Ridge'] = ridge.coef_

# 23. Lasso
lasso = Lasso(alpha=1e-5, fit_intercept=False, max_iter=10000).fit(A, b)
solutions['Lasso'] = lasso.coef_

# 24. Elastic Net
enet = ElasticNet(alpha=1e-5, l1_ratio=0.5, fit_intercept=False, max_iter=10000).fit(A, b)
solutions['Elastic Net'] = enet.coef_

# 25. PCR
Z = PCA(n_components=2).fit_transform(A)
pcr = LinearRegression().fit(Z, b)
solutions['PCR (Pseudo-Inverse)'] = np.linalg.pinv(A) @ b

# 26. PLS
pls = PLSRegression(n_components=2).fit(A, b)
solutions['PLS'] = pls.coef_.flatten()

# 27. Bayesian
bayes = BayesianRidge(fit_intercept=False).fit(A, b)
solutions['Bayesian'] = bayes.coef_

# 28. RANSAC (修正: min_samples=1 を指定)
ransac = RANSACRegressor(estimator=LinearRegression(fit_intercept=False), random_state=0, min_samples=1).fit(A, b)
solutions['RANSAC'] = ransac.estimator_.coef_

# 29. Moore–Penrose Pseudo-Inverse
solutions['Pseudo-Inverse'] = np.linalg.pinv(A) @ b

# 30. Sparse Solver
solutions['Sparse Solver'] = spsolve(csr_matrix(A), b)

# 結果を DataFrame として出力
df = pd.DataFrame(solutions).T
df.columns = ['p', 'q']
df.head(30)


基礎問題精講2B例題16複素数の計算


import numpy as np
import matplotlib.pyplot as plt

# 1. z^3 = 1 の複素数解のプロット / Plot the complex solutions for z^3 = 1
# z^3 = 1 の解は、1, ω, ω^2 で、ω = e^(2πi/3)
omega = np.exp(2j * np.pi / 3)  # ω = e^(2πi/3)
z1 = 1  # 1
z2 = omega  # ω
z3 = omega**2  # ω^2

# 複素数解のプロット / Plot complex solutions
plt.figure(figsize=(6, 6))
plt.plot([0, z1.real], [0, z1.imag], label='$z_1 = 1$', marker='o', color='blue')
plt.plot([0, z2.real], [0, z2.imag], label='$z_2 = \omega$', marker='o', color='red')
plt.plot([0, z3.real], [0, z3.imag], label='$z_3 = \omega^2$', marker='o', color='green')

# 単位円を描画 / Draw unit circle
circle = plt.Circle((0, 0), 1, color='black', fill=False, linestyle='--')
plt.gca().add_artist(circle)

# グラフ設定 / Set graph labels and title
plt.xlim(-1.5, 1.5)
plt.ylim(-1.5, 1.5)
plt.axhline(0, color='black',linewidth=1)
plt.axvline(0, color='black',linewidth=1)
plt.gca().set_aspect('equal', adjustable='box')
plt.title('Complex Solutions to $z^3 = 1$')
plt.legend(loc='upper left')
plt.grid(True)
plt.show()

# 2. 三相交流のプロット / Plotting three-phase AC waveforms
# 時間軸の定義 / Define time axis
t = np.linspace(0, 2 * np.pi, 500)  # 1周期を描画 / One cycle

# 三相交流波形の計算 / Calculate three-phase AC waveforms
V_phase_a = np.sin(t)  # 相Aの波形 / Phase A waveform
V_phase_b = np.sin(t - 2 * np.pi / 3)  # 相Bの波形 / Phase B waveform
V_phase_c = np.sin(t + 2 * np.pi / 3)  # 相Cの波形 / Phase C waveform

# 三相交流波形のプロット / Plot three-phase AC waveforms
plt.figure(figsize=(8, 6))
plt.plot(t, V_phase_a, label='Phase A', color='blue')
plt.plot(t, V_phase_b, label='Phase B', color='red', linestyle='--')
plt.plot(t, V_phase_c, label='Phase C', color='green', linestyle='-.')
plt.xlabel('Time [s]')
plt.ylabel('Voltage [V]')
plt.title('Three-Phase AC Waveforms')
plt.legend()
plt.grid(True)
plt.show()

青チャート2B重要例題点と直線の距離と機械学習


import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors

# 1. 仮定線 y = x 上の点 P と直線 x - 2y - 4 = 0 上の点 Q の最小距離を求める

# 点Pの座標 (Pはy=x上にあるため、P(a, a)と表せる)
def distance_pq(a):
    # 点Pの座標
    p = np.array([a, a])
    
    # 点Qの座標 (直線 x - 2y - 4 = 0 を y = (x - 4)/2 に変換)
    x_q = (6*a + 4) / 5
    y_q = (3*a - 8) / 5
    q = np.array([x_q, y_q])
    
    # PとQの距離 D^2 を計算
    distance_squared = np.sum((p - q) ** 2)
    return distance_squared

# aに関して距離の2乗を最小化する
from scipy.optimize import minimize

# 最適化を行う
result = minimize(lambda a: distance_pq(a), x0=0)  # 初期値 x0=0 で最適化
optimal_a = result.x[0]

# 最適な点Pの座標を計算
optimal_p = np.array([optimal_a, optimal_a])

# 最適な点Qの座標を計算
optimal_x_q = (6*optimal_a + 4) / 5
optimal_y_q = (3*optimal_a - 8) / 5
optimal_q = np.array([optimal_x_q, optimal_y_q])

# 最小距離を計算
optimal_distance = np.sqrt(np.sum((optimal_p - optimal_q) ** 2))

# 結果の表示
print(f"最小距離が生じる点Pの座標: P({optimal_a}, {optimal_a})")
print(f"最小距離が生じる点Qの座標: Q({optimal_x_q}, {optimal_y_q})")
print(f"最小距離: D = {optimal_distance}")

# 2. 機械学習における距離の概念と異常検知
# 正常データと異常データを生成 / Generate normal and anomalous data
normal_data = np.random.normal(loc=0, scale=1, size=(100, 2))  # 正常なデータ / Normal data
anomalous_data = np.random.normal(loc=5, scale=1, size=(10, 2))  # 異常なデータ / Anomalous data

# データを結合 / Combine data
data = np.vstack([normal_data, anomalous_data])

# k-NNモデルの作成 / Create k-NN model
knn = NearestNeighbors(n_neighbors=3)
knn.fit(normal_data)  # 正常なデータを使って学習 / Train on normal data

# 各点との距離を計算 / Calculate distances to the nearest neighbors
distances, _ = knn.kneighbors(data)

# 距離の閾値を設定 / Set a threshold for the distance
threshold = np.percentile(distances, 95)  # 95パーセンタイルを閾値とする

# 異常値の検出 / Detect anomalies
anomalies = distances.max(axis=1) > threshold

# プロット / Plot
plt.figure(figsize=(8, 6))
plt.scatter(data[:, 0], data[:, 1], c=anomalies, cmap='coolwarm', label='Data points')
plt.title('Anomaly Detection using k-NN')
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.legend()
plt.show()

青チャート2B例題151 3角方程式とFFT

import numpy as np
import matplotlib.pyplot as plt

# --- 定義 / Define Parameters ---
frequency = 440  # 周波数 / Frequency (440 Hz)
sampling_rate = 5000  # サンプリング周波数 / Sampling rate (5kHz)
duration = 1  # 時間 / Duration in seconds

# 時間軸 / Time axis
t = np.linspace(0, duration, int(sampling_rate * duration), endpoint=False)

# 信号 / Signal: sin(2θ) + sin(3θ) + sin(4θ)
signal = np.sin(2 * np.pi * 2 * frequency * t) + np.sin(2 * np.pi * 3 * frequency * t) + np.sin(2 * np.pi * 4 * frequency * t)

# プロット / Plot the waveform
plt.figure(figsize=(10, 6))
plt.plot(t, signal, label=r'$\sin(2\theta)+\sin(3\theta)+\sin(4\theta)$', color='black')
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.title('Waveform of sin(2θ) + sin(3θ) + sin(4θ) with frequency 440Hz')
plt.grid(True)
plt.legend()
plt.show()

# --- FFT ---
# FFTの計算 / Compute the FFT of the signal
fft_signal = np.fft.fft(signal)
frequencies = np.fft.fftfreq(len(t), 1/sampling_rate)

# FFT結果の振幅 / Magnitude of the FFT
magnitude = np.abs(fft_signal)

# 正の周波数のみ表示 / Only display positive frequencies
positive_frequencies = frequencies[:len(frequencies)//2]
positive_magnitude = magnitude[:len(magnitude)//2]

# プロット / Plot the FFT result
plt.figure(figsize=(10, 6))
plt.plot(positive_frequencies, positive_magnitude, label='Magnitude of FFT', color='blue')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Magnitude')
plt.title('FFT of sin(2θ) + sin(3θ) + sin(4θ)')
plt.grid(True)
plt.legend()
plt.show()

青チャート3基本例題146 合成関数の微分と活性化関数

import numpy as np
from sympy import symbols, diff, simplify

# --- 1. 微分計算 / Derivative Calculation ---
# 変数 x を定義 / Define the variable x
x = symbols('x')

# 関数 y = 3(x^2 + 1)^3 を定義 / Define the function y = 3(x^2 + 1)^3
y = 3 * (x**2 + 1)**3

# y を x について微分 / Differentiate y with respect to x
y_prime = diff(y, x)

# 結果を簡約して表示 / Simplify and display the result
y_prime_simplified = simplify(y_prime)
print("導関数 y' =", y_prime_simplified)

# --- 2. 活性化関数とその微分 / Activation Functions and their Derivatives ---
# シグモイド関数とその微分 / Sigmoid and its derivative
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):
    return sigmoid(x) * (1 - sigmoid(x))

# ReLU関数とその微分 / ReLU and its derivative
def relu(x):
    return np.maximum(0, x)

def relu_derivative(x):
    return (x > 0).astype(float)

# ソフトマックス関数 / Softmax
def softmax(x):
    exp_x = np.exp(x - np.max(x))  # 数値安定性のために最大値を引く
    return exp_x / np.sum(exp_x, axis=0, keepdims=True)

# --- 3. 合成関数の微分 / Derivative of Composed Functions ---
# 合成関数: f(g(x)) の微分 / Derivative of composite function: f(g(x))
# チェーンルール / Chain rule
def composite_derivative(f, g, g_derivative, x):
    return f(g(x)) * g_derivative(x)

# --- 4. バックプロパゲーション / Backpropagation ---
# フォワードパスとバックプロパゲーションの実装
def forward_backward_propagation(X, y, weights1, weights2, learning_rate=0.01):
    # フォワードパス / Forward pass
    z1 = np.dot(X, weights1)
    a1 = sigmoid(z1)  # 1層目の出力 (シグモイド活性化)
    
    z2 = np.dot(a1, weights2)
    a2 = softmax(z2)  # 2層目の出力 (ソフトマックス活性化)

    # コスト関数の計算 (クロスエントロピー) / Cost function (cross-entropy loss)
    m = y.shape[0]  # サンプル数 / Number of samples
    cost = -np.sum(y * np.log(a2)) / m

    # バックプロパゲーション / Backpropagation
    # 2層目の誤差 / Error for second layer
    dz2 = a2 - y
    dw2 = np.dot(a1.T, dz2) / m
    
    # 1層目の誤差 / Error for first layer
    dz1 = np.dot(dz2, weights2.T) * sigmoid_derivative(z1)
    dw1 = np.dot(X.T, dz1) / m

    # 重みの更新 / Update weights
    weights1 -= learning_rate * dw1
    weights2 -= learning_rate * dw2

    return cost, weights1, weights2

# --- 5. ニューラルネットワークの初期化 / Initialize Neural Network ---
np.random.seed(42)
X = np.random.randn(5, 3)  # 入力データ (5サンプル, 3特徴量)
y = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0]])  # One-hot エンコードされた出力

# 重みの初期化 / Initialize weights
weights1 = np.random.randn(3, 4)  # 1層目の重み (3特徴量 -> 4ユニット)
weights2 = np.random.randn(4, 3)  # 2層目の重み (4ユニット -> 3クラス)

# 学習 / Training process
epochs = 1000
for epoch in range(epochs):
    cost, weights1, weights2 = forward_backward_propagation(X, y, weights1, weights2)
    
    if epoch % 100 == 0:
        print(f"Epoch {epoch} | Cost: {cost:.4f}")

# --- 最終的な出力の確認 / Final Output ---
z1 = np.dot(X, weights1)
a1 = sigmoid(z1)
z2 = np.dot(a1, weights2)
a2 = softmax(z2)  # 最終的な出力 (ソフトマックス)
print("Final Output (Softmax):")
print(a2)

追加 積分計算

import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm

# 可視化対象: 以下の4つを選定して表示
# 1. Expected Value (期待値)
# 2. Variance (分散)
# 3. Entropy (エントロピー)
# 4. Kernel Normalization (カーネルの正規化)

x_vals = np.linspace(-5, 5, 400)
p = norm.pdf(x_vals, loc=0, scale=1)  # 標準正規分布
mean = np.sum(x_vals * p) * (x_vals[1] - x_vals[0])  # 期待値
variance = np.sum((x_vals - mean)**2 * p) * (x_vals[1] - x_vals[0])  # 分散
entropy = -np.sum(p * np.log(p + 1e-12)) * (x_vals[1] - x_vals[0])  # エントロピー

# RBFカーネルの正規化確認: k(x, x') = exp(- (x - x')^2 / 2σ^2 )
def rbf_kernel(x, x_prime=0.0, sigma=1.0):
    return np.exp(- (x - x_prime)**2 / (2 * sigma**2))

kernel_vals = rbf_kernel(x_vals)
kernel_area = np.sum(kernel_vals) * (x_vals[1] - x_vals[0])  # 積分値

# プロット 1: PDFと期待値・分散
plt.figure()
plt.plot(x_vals, p, label='Standard Normal PDF')
plt.axvline(mean, color='r', linestyle='--', label=f'Mean ≈ {mean:.2f}')
plt.title("Expected Value and Variance")
plt.xlabel("x")
plt.ylabel("p(x)")
plt.legend()
plt.grid(True)
plt.show()

# プロット 2: Entropy
plt.figure()
plt.plot(x_vals, -p * np.log(p + 1e-12), label='-p(x) log p(x)')
plt.title(f"Entropy ≈ {entropy:.3f}")
plt.xlabel("x")
plt.ylabel("Entropy Density")
plt.grid(True)
plt.legend()
plt.show()

# プロット 3: RBFカーネルの正規化
plt.figure()
plt.plot(x_vals, kernel_vals, label=f'RBF Kernel (area ≈ {kernel_area:.3f})')
plt.title("RBF Kernel Normalization")
plt.xlabel("x")
plt.ylabel("k(x, 0)")
plt.legend()
plt.grid(True)
plt.show()

2
3
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
2
3

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?