0
1

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

コピペで学ぶPythonポケモン線形代数学

Last updated at Posted at 2025-04-17

ポケモンを題材にした線形代数学の学習カリキュラムを作成しました。こちらは初期案(たたき台)としてご確認ください。


# -*- coding: utf-8 -*-
# プログラム名 / Program Name: pokemon_vector_chapter.py
# 概要: ポケモンの「HPと素早さ」を用いた平面ベクトル、「HP・攻撃・素早さ」で空間ベクトルを学ぶ

import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

# -------------------------------
# ポケモンデータ(HP, 攻撃, 素早さ)
# Pokémon stats: HP, Attack, Speed
# -------------------------------
pokemon_stats = {
    "Pikachu":     {"HP": 35, "Atk": 55, "Spd": 90},
    "Charizard":   {"HP": 78, "Atk": 84, "Spd": 100},
    "Bulbasaur":   {"HP": 45, "Atk": 49, "Spd": 45},
    "Snorlax":     {"HP": 160, "Atk": 110, "Spd": 30},
    "Dragonite":   {"HP": 91, "Atk": 134, "Spd": 80},
    "Mewtwo":      {"HP": 106, "Atk": 110, "Spd": 130},
}

# 平面ベクトル:HPと素早さ / 2D vectors (HP, Speed)
vec2d = {name: np.array([stats["HP"], stats["Spd"]]) for name, stats in pokemon_stats.items()}

# 空間ベクトル:HP, 攻撃, 素早さ / 3D vectors (HP, Attack, Speed)
vec3d = {name: np.array([stats["HP"], stats["Atk"], stats["Spd"]]) for name, stats in pokemon_stats.items()}

# -------------------------------
# 1節 平面ベクトル / Plane Vectors
# -------------------------------

def plane_vector_operations():
    """1-1 ベクトルの和・差・実数倍 / Vector addition, subtraction, scalar multiple"""
    A, B = vec2d["Pikachu"], vec2d["Charizard"]
    print("[1-1] Pikachu + Charizard =", A + B)
    print("      Pikachu - Charizard =", A - B)
    print("      2 × Pikachu =", 2 * A)

def plane_vector_components():
    """1-2 ベクトルの成分表示 / Vector components"""
    for name, v in vec2d.items():
        print(f"[1-2] {name}: HP={v[0]}, Speed={v[1]}")

def plane_dot_product():
    """1-3 ベクトルの内積 / Dot product"""
    A, B = vec2d["Bulbasaur"], vec2d["Charizard"]
    dot = np.dot(A, B)
    print(f"[1-3] Dot(Bulbasaur, Charizard) = {dot}")

def triangle_area_by_vectors():
    """研究:三角形の面積 / Triangle area by 2D cross product"""
    A = vec2d["Pikachu"]
    B = vec2d["Bulbasaur"]
    area = 0.5 * np.abs(np.cross(A, B))
    print(f"[研究] Area of triangle formed by Pikachu and Bulbasaur = {area}")

def visualize_inner_product():
    """COLUMN: 内積の図形的意味 / Visual meaning of dot product"""
    A = vec2d["Pikachu"]
    B = vec2d["Charizard"]
    cos_theta = np.dot(A, B) / (np.linalg.norm(A) * np.linalg.norm(B))
    print(f"[COLUMN] Cosine of angle between Pikachu and Charizard = {cos_theta:.3f}")

def plane_vector_application():
    """1-4 ベクトルの応用 / Vector length and angle"""
    A = vec2d["Mewtwo"]
    length = np.linalg.norm(A)
    print(f"[1-4] |Mewtwo| = {length:.2f}")

# -------------------------------
# 2節 空間ベクトル / 3D Vectors
# -------------------------------

def space_vector_operations():
    """2-1 ベクトルの和・差・実数倍 / 3D vector operations"""
    A = vec3d["Dragonite"]
    B = vec3d["Mewtwo"]
    print("[2-1] Dragonite + Mewtwo =", A + B)
    print("      Dragonite - Mewtwo =", A - B)

def space_vector_components():
    """2-2 成分表示 / Display components"""
    for name, v in vec3d.items():
        print(f"[2-2] {name}: HP={v[0]}, Atk={v[1]}, Spd={v[2]}")

def space_dot_product():
    """2-3 内積 / Dot product in 3D"""
    A = vec3d["Dragonite"]
    B = vec3d["Mewtwo"]
    dot = np.dot(A, B)
    print(f"[2-3] Dot(Dragonite, Mewtwo) = {dot}")

def space_cross_product():
    """研究:外積 / Cross product (方向ベクトル)"""
    A = vec3d["Pikachu"]
    B = vec3d["Bulbasaur"]
    cross = np.cross(A, B)
    print(f"[研究] Cross(Pikachu, Bulbasaur) = {cross}")

def point_to_plane_distance():
    """研究:点と平面の距離 / Distance from point to plane"""
    P = vec3d["Snorlax"]
    normal_vec = np.array([1, -1, 1])
    d = -10  # 平面の定数項
    distance = np.abs(np.dot(normal_vec, P) + d) / np.linalg.norm(normal_vec)
    print(f"[研究] Distance from Snorlax to plane = {distance:.2f}")

# -------------------------------
# メイン関数 / Main Entry Point
# -------------------------------

def main():
    print("\n=== Pokémon Vector Chapter ===\n")
    # 平面ベクトル
    plane_vector_operations()
    plane_vector_components()
    plane_dot_product()
    triangle_area_by_vectors()
    visualize_inner_product()
    plane_vector_application()

    print("\n--- 空間ベクトル ---\n")
    space_vector_operations()
    space_vector_components()
    space_dot_product()
    space_cross_product()
    point_to_plane_distance()

if __name__ == "__main__":
    main()

# -*- coding: utf-8 -*-
# プログラム名 / Program Name: pokemon_determinant_chapter.py
# 概要: ポケモンのステータス(HP, 攻撃, 素早さ)を行列として扱い、行列式の性質と応用を学ぶ

import numpy as np
import sympy as sp

# -------------------------------
# ポケモンデータ(HP, 攻撃, 素早さ) / Pokémon Stats
# -------------------------------
pokemon_stats = {
    "Pikachu": [35, 55, 90],
    "Charizard": [78, 84, 100],
    "Bulbasaur": [45, 49, 45],
    "Snorlax": [160, 110, 30],
    "Mewtwo": [106, 110, 130],
    "Dragonite": [91, 134, 80],
}

# データ行列作成(例として2×2, 3×3, 4×4) / Create matrices from Pokémon stats
matrix2x2 = np.array([
    [pokemon_stats["Pikachu"][0], pokemon_stats["Charizard"][0]],  # HP
    [pokemon_stats["Pikachu"][2], pokemon_stats["Charizard"][2]]   # Speed
])

matrix3x3 = np.array([
    pokemon_stats["Pikachu"],
    pokemon_stats["Bulbasaur"],
    pokemon_stats["Charizard"]
])

matrix4x4 = np.array([
    [1, 2, 3, 4],
    [0, 1, 2, 3],
    [5, 1, 2, 0],
    [3, 1, 0, 1]
])

# ------------------------------------------
# 3-1 行列式の定義と性質 / Definition & Property
# ------------------------------------------

def det_2x2():
    """3-1-1 2次の行列式 / 2x2 determinant"""
    det = np.linalg.det(matrix2x2)
    print(f"[3-1-1] det(2x2 Pikachu-Charizard) = {det:.2f}")

def det_nxn():
    """3-1-2 n次の行列式 / General determinant"""
    det = np.linalg.det(matrix3x3)
    print(f"[3-1-2] det(3x3 Pikachu-Bulbasaur-Charizard) = {det:.2f}")

def det_expansion():
    """3-1-3 展開(Laplace展開)"""
    M = sp.Matrix(matrix3x3)
    det = M.det(method='berkowitz')  # or .det()
    print(f"[3-1-3] Determinant expansion (sympy) = {det}")

def symbolic_determinant():
    """3-1-4 文字を含む行列式"""
    a, b, c = sp.symbols('a b c')
    M = sp.Matrix([[a, 1], [2, b]])
    print("[3-1-4] Determinant with symbols:")
    sp.pprint(M.det())

def det_product_property():
    """3-1-5 行列の積の行列式 = 行列式の積"""
    A = np.array([[2, 1], [3, 4]])
    B = np.array([[1, 0], [5, 2]])
    det_A = np.linalg.det(A)
    det_B = np.linalg.det(B)
    det_AB = np.linalg.det(A @ B)
    print(f"[3-1-5] det(A) * det(B) = {det_A:.2f} * {det_B:.2f} = {det_A * det_B:.2f}")
    print(f"         det(AB) = {det_AB:.2f}")

def permutation_definition():
    """研究:順列を用いた定義 / Determinant by permutation"""
    M = sp.Matrix(matrix3x3)
    print(f"[研究] Determinant by permutation:")
    sp.pprint(M.det(method='bareiss'))

def amida_column():
    """COLUMN:阿弥陀くじのような視点で行列式を観察"""
    print("[COLUMN] 行列の各列をたどることで、置換のように結びつけられる視覚的イメージあり(阿弥陀くじ的)")

# ------------------------------------------
# 3-2 行列式の応用 / Applications
# ------------------------------------------

def determinant_inverse():
    """3-2-1 行列式と逆行列"""
    A = np.array([[2, 3], [1, 4]])
    det_A = np.linalg.det(A)
    if det_A != 0:
        inv_A = np.linalg.inv(A)
        print("[3-2-1] Inverse exists. Inverse of A:")
        print(inv_A)
    else:
        print("[3-2-1] Determinant is zero. Inverse does not exist.")

def cramer_rule_solution():
    """研究:クラメルの公式による解 / Cramer's Rule"""
    A = np.array([[1, 2], [3, 4]])
    b = np.array([5, 6])
    detA = np.linalg.det(A)
    if detA != 0:
        A1 = A.copy(); A1[:, 0] = b
        A2 = A.copy(); A2[:, 1] = b
        x1 = np.linalg.det(A1) / detA
        x2 = np.linalg.det(A2) / detA
        print(f"[研究] Cramer's Rule Solution: x1 = {x1:.2f}, x2 = {x2:.2f}")
    else:
        print("[研究] Singular matrix. No unique solution.")

def geometric_meaning():
    """3-2-3 図形的意味:行列式は面積(2次)・体積(3次)を表す"""
    area = np.abs(np.linalg.det(matrix2x2))
    volume = np.abs(np.linalg.det(matrix3x3))
    print(f"[3-2-3] Area of parallelogram (2D) = {area:.2f}")
    print(f"         Volume of parallelepiped (3D) = {volume:.2f}")

# -------------------------------
# メイン関数 / Main
# -------------------------------

def main():
    print("\n=== Pokémon Determinant Chapter ===\n")
    # 3-1 定義と性質
    det_2x2()
    det_nxn()
    det_expansion()
    symbolic_determinant()
    det_product_property()
    permutation_definition()
    amida_column()

    print("\n--- Applications of Determinant ---\n")
    determinant_inverse()
    cramer_rule_solution()
    geometric_meaning()

if __name__ == "__main__":
    main()

# -*- coding: utf-8 -*-
# プログラム名: pokemon_matrix_applications.py
# 概要: ポケモンのベクトル・行列を用いて1次変換と固有値・対角化を学ぶ

import numpy as np
import sympy as sp
import matplotlib.pyplot as plt

# --- ポケモンの位置ベクトル(HP, Speed) / 2D vectors for transformation ---
pokemon_vec2d = {
    "Pikachu": np.array([35, 90]),
    "Charizard": np.array([78, 100]),
    "Bulbasaur": np.array([45, 45]),
}

# -------------------------------
# 1節:1次変換 / Linear Transformations
# -------------------------------

def linear_transform_definition():
    """1-1 1次変換の定義"""
    T = np.array([[2, 0], [0, 1]])  # x方向に2倍の変換
    v = pokemon_vec2d["Pikachu"]
    result = T @ v
    print("[1-1] T(Pikachu) =", result)

def rotation_transformation():
    """1-2 回転を表す1次変換"""
    theta = np.pi / 4  # 45度回転
    R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
    v = pokemon_vec2d["Bulbasaur"]
    result = R @ v
    print("[1-2] Rotation(Bulbasaur) =", result)

def composition_and_inverse():
    """1-3 合成変換と逆変換"""
    S = np.array([[1, 2], [0, 1]])
    T = np.array([[2, 0], [0, 3]])
    ST = S @ T
    inv_ST = np.linalg.inv(ST)
    print("[1-3] S・T 合成行列:\n", ST)
    print("       S・T の逆行列:\n", inv_ST)

def reflection_across_line():
    """研究:原点を通る直線に関する対称移動"""
    # y = x に関する対称移動
    R = np.array([[0, 1], [1, 0]])
    v = pokemon_vec2d["Charizard"]
    print("[研究] Reflect(Charizard) across y = x =", R @ v)

def linearity_of_transformation():
    """1-4 線形性:T(a*v + b*w) = a*T(v) + b*T(w)"""
    v = pokemon_vec2d["Pikachu"]
    w = pokemon_vec2d["Bulbasaur"]
    T = np.array([[1, 1], [0, 1]])
    a, b = 2, -1
    left = T @ (a * v + b * w)
    right = a * (T @ v) + b * (T @ w)
    print("[1-4] Linear check: Left == Right ? ", np.allclose(left, right))

def transformation_and_line():
    """1-5 1次変換と直線:直線が直線に移ることを確認"""
    P1 = np.array([0, 0])
    P2 = np.array([1, 2])
    L = np.vstack([P1, P2])
    T = np.array([[1, 1], [0, 2]])
    L_trans = (T @ L.T).T
    print("[1-5] Line before:", L)
    print("       Line after :", L_trans)

def transformation_and_conic():
    """1-6 1次変換と2次曲線の関係(例: 円→楕円)"""
    circle = np.array([[np.cos(t), np.sin(t)] for t in np.linspace(0, 2*np.pi, 100)])
    A = np.array([[2, 1], [1, 1]])
    ellipse = (A @ circle.T).T
    plt.figure()
    plt.plot(circle[:, 0], circle[:, 1], label='Unit Circle')
    plt.plot(ellipse[:, 0], ellipse[:, 1], label='Transformed Ellipse')
    plt.axis('equal')
    plt.legend()
    plt.title("[1-6] Transformation of Circle to Ellipse")
    plt.show()

# COLUMN:空間図形の応用(補助)
def spatial_column():
    print("[COLUMN] 空間図形にも同様の変換が拡張可能(例:回転体、対称体)")

# 研究:ベクトル空間・部分空間・基底
def vector_space_study():
    """ベクトル空間と基底の例"""
    basis = np.array([[1, 0], [0, 1]])
    v = np.array([3, 2])
    coeffs = np.linalg.solve(basis.T, v)
    print("[研究] 基底 {e1, e2} による Pikachu の表現 =", coeffs)

# -------------------------------
# 2節:固有値と対角化 / Eigenvalues & Diagonalization
# -------------------------------

def eigenvalues_and_vectors():
    """2-1 固有値・固有ベクトル"""
    A = np.array([[4, 1], [2, 3]])
    eigvals, eigvecs = np.linalg.eig(A)
    print("[2-1] Eigenvalues:", eigvals)
    print("       Eigenvectors:\n", eigvecs)

def diagonalization():
    """2-2 正方行列の対角化"""
    A = np.array([[4, 1], [2, 3]])
    P, D = np.linalg.eig(A)
    P_inv = np.linalg.inv(np.array([[4, 1], [2, 3]]) @ np.linalg.inv(np.array([[P[0], P[1]], [1, 1]])))
    print("[2-2] A = PDP⁻¹ with eigen decomposition")

def diagonalize_symmetric():
    """2-3 対称行列の対角化"""
    A = np.array([[2, -1], [-1, 2]])
    eigvals, eigvecs = np.linalg.eigh(A)
    print("[2-3] Symmetric Matrix Diagonalization")
    print("       Eigenvalues:", eigvals)
    print("       Eigenvectors:\n", eigvecs)

def application_of_diagonalization():
    """2-4 対角化の応用例(例:べき乗計算の高速化)"""
    A = np.array([[4, 1], [2, 3]])
    eigvals, eigvecs = np.linalg.eig(A)
    D = np.diag(eigvals)
    P = eigvecs
    n = 5
    A_pow_n = P @ np.linalg.matrix_power(D, n) @ np.linalg.inv(P)
    print(f"[2-4] A^{n} via diagonalization =\n", A_pow_n)

# -------------------------------
# メイン関数 / Main
# -------------------------------

def main():
    print("\n=== Pokémon Matrix Applications ===\n")
    # 1節:1次変換
    linear_transform_definition()
    rotation_transformation()
    composition_and_inverse()
    reflection_across_line()
    linearity_of_transformation()
    transformation_and_line()
    transformation_and_conic()
    spatial_column()
    vector_space_study()

    # 2節:固有値と対角化
    eigenvalues_and_vectors()
    diagonalization()
    diagonalize_symmetric()
    application_of_diagonalization()

if __name__ == "__main__":
    main()

# -*- coding: utf-8 -*-
# プログラム名: pokemon_linear_algebra_ml.py
# 概要: ポケモンの種族値を用いて、機械学習における線形代数学の応用を学ぶ

import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from numpy.linalg import svd, norm
import torch

# -------------------------------
# ポケモンデータ(HP, Atk, Spd)
# -------------------------------
pokemon_stats = {
    "Pikachu": [35, 55, 90],
    "Charizard": [78, 84, 100],
    "Bulbasaur": [45, 49, 45],
    "Snorlax": [160, 110, 30],
    "Mewtwo": [106, 110, 130],
    "Dragonite": [91, 134, 80],
}
names = list(pokemon_stats.keys())
X = np.array(list(pokemon_stats.values()))  # 6×3 行列

# -------------------------------
# 1. ベクトル内積 → 類似度(コサイン類似度)
# -------------------------------
def cosine_similarity():
    a, b = X[0], X[1]  # Pikachu vs Charizard
    cos_sim = np.dot(a, b) / (norm(a) * norm(b))
    print(f"[1] Cosine Similarity (Pikachu, Charizard): {cos_sim:.3f}")

# -------------------------------
# 2. 行列の積 → NNの1層変換(仮想)
# -------------------------------
def matrix_multiplication_nn():
    W = np.random.randn(3, 2)  # 重み:3入力→2出力
    out = X @ W  # 種族値×重み = 出力
    print("[2] NN Output Shape:", out.shape)
    print("    First Row Output:", out[0])

# -------------------------------
# 3. 固有値・固有ベクトル → PCA
# -------------------------------
def principal_component_analysis():
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X)
    plt.scatter(X_pca[:, 0], X_pca[:, 1])
    for i, name in enumerate(names):
        plt.text(X_pca[i, 0]+1, X_pca[i, 1]+1, name)
    plt.title("[3] PCA of Pokémon Stats")
    plt.xlabel("PC1")
    plt.ylabel("PC2")
    plt.axis("equal")
    plt.grid()
    plt.show()

# -------------------------------
# 4. SVD → 次元削減、推薦
# -------------------------------
def singular_value_decomposition():
    U, S, Vt = svd(X)
    print(f"[4] SVD - Singular Values: {S}")
    rank_2_approx = (U[:, :2] * S[:2]) @ Vt[:2, :]
    print("    Reconstructed Pikachu (approx):", rank_2_approx[0])

# -------------------------------
# 5. ノルムと距離 → 最近傍法
# -------------------------------
def nearest_neighbor():
    target = X[0]  # Pikachu
    distances = np.linalg.norm(X - target, axis=1)
    nearest_index = np.argsort(distances)[1]  # 除く自身
    print(f"[5] Nearest to Pikachu is: {names[nearest_index]} (distance: {distances[nearest_index]:.2f})")

# -------------------------------
# 6. 行列の階数 → 特徴の冗長性確認
# -------------------------------
def rank_check():
    r = np.linalg.matrix_rank(X)
    print(f"[6] Matrix Rank of Stats: {r} (out of {X.shape[1]})")

# -------------------------------
# 7. 逆伝播におけるヤコビ行列(PyTorchで微分)
# -------------------------------
def jacobian_example():
    x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True)
    W = torch.tensor([[1.0, -1.0, 2.0]], requires_grad=False)
    y = W @ x
    y.backward()
    print(f"[7] Jacobian of y = W·x: {x.grad.numpy()}")  # ∂y/∂x_i

# -------------------------------
# 8. Tensor演算の基礎(PyTorchテンソル)
# -------------------------------
def tensor_operations():
    T = torch.tensor(X, dtype=torch.float32)
    print(f"[8] Tensor shape: {T.shape}")
    mean_stats = torch.mean(T, dim=0)
    print(f"    Mean HP, Atk, Spd: {mean_stats.numpy()}")

# -------------------------------
# メイン関数 / Main
# -------------------------------
def main():
    print("\n=== Pokémon × Linear Algebra for ML ===\n")
    cosine_similarity()
    matrix_multiplication_nn()
    principal_component_analysis()
    singular_value_decomposition()
    nearest_neighbor()
    rank_check()
    jacobian_example()
    tensor_operations()

if __name__ == "__main__":
    main()

import numpy as np

# --- 係数行列 A と定数ベクトル b の定義 ---
# x = HP, y = Atk, z = Spd
A = np.array([
    [1, 1, 1],    # x + y + z = 260
    [1, 1, 0],    # x + y = 140
    [0, 1, -1]    # y - z = 30
])
b = np.array([260, 140, 30])

# --- 解を求める ---
solution = np.linalg.solve(A, b)
hp, atk, spd = solution

# --- 結果表示 ---
print("=== ポケモンの能力値(連立方程式による) ===")
print(f"HP       = {hp:.0f}")
print(f"攻撃(Atk) = {atk:.0f}")
print(f"素早さ(Spd) = {spd:.0f}")

# -*- coding: utf-8 -*-
# プログラム名: pokemon_linear_algebra_ml_full.py
# 概要: ポケモンの種族値で機械学習に必要な線形代数学を統合学習(全8テーマ+回帰分析強化)

import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics.pairwise import rbf_kernel
from scipy.linalg import expm, lu, qr
import torch
import tensorly as tl
from tensorly.decomposition import parafac

# -------------------------------
# ポケモン種族値(HP, Atk, Spd)
# -------------------------------
pokemon_stats = {
    "Pikachu":     [35, 55, 90],
    "Charizard":   [78, 84, 100],
    "Bulbasaur":   [45, 49, 45],
    "Snorlax":     [160, 110, 30],
    "Mewtwo":      [106, 110, 130],
    "Dragonite":   [91, 134, 80],
}
names = list(pokemon_stats.keys())
X = np.array(list(pokemon_stats.values()))  # shape: (6, 3)

# -------------------------------
# 1. 射影行列(最小二乗、PCA)
# -------------------------------
def projection_matrix():
    A = X[:, :2]  # HP, Atk の2次元のみ使う
    P = A @ np.linalg.inv(A.T @ A) @ A.T
    proj = P @ X[:, :2]
    print("[1] 射影結果 (HP-Atk空間):\n", proj)

# -------------------------------
# 2. 対称行列の対角化(クラスタ分離)
# -------------------------------
def diagonalize_symmetric():
    M = np.cov(X.T)
    eigvals, eigvecs = np.linalg.eigh(M)
    print("[2] 固有値:\n", eigvals)
    print("     固有ベクトル:\n", eigvecs)

# -------------------------------
# 3. Gram行列とRBFカーネル
# -------------------------------
def gram_kernel_matrices():
    G = X @ X.T
    print("[3] Gram行列(線形):\n", G)
    G_rbf = rbf_kernel(X, gamma=0.001)
    sns.heatmap(G_rbf, xticklabels=names, yticklabels=names, annot=True, fmt=".2f")
    plt.title("RBF Kernel (ポケモン類似度)")
    plt.show()

# -------------------------------
# 4. 正定値行列(共分散・GP)
# -------------------------------
def positive_definite_cov():
    cov = np.cov(X.T)
    is_pd = np.all(np.linalg.eigvals(cov) > 0)
    print(f"[4] 共分散行列は正定値か?: {is_pd}")
    print("     共分散行列:\n", cov)

# -------------------------------
# 5. 行列指数関数 exp(A)
# -------------------------------
def matrix_exponential():
    A = np.array([[0, 1], [-1, 0]])
    expA = expm(A)
    print("[5] 行列指数関数 exp(A):\n", expA)

# -------------------------------
# 6. LU分解
# -------------------------------
def lu_decomposition():
    A = np.array([[2, 3], [5, 4]])
    P, L, U = lu(A)
    print("[6] LU分解:")
    print("    L:\n", L)
    print("    U:\n", U)

# -------------------------------
# 7. QR分解(回帰+R²+残差)
# -------------------------------
def qr_decomposition():
    X_hp = np.c_[np.ones((X.shape[0], 1)), X[:, 0]]  # 定数項 + HP
    y_atk = X[:, 1]
    Q, R = qr(X_hp, mode='economic')
    beta = np.linalg.solve(R, Q.T @ y_atk)
    y_pred = X_hp @ beta
    residuals = y_atk - y_pred
    r2 = 1 - np.sum(residuals**2) / np.sum((y_atk - np.mean(y_atk))**2)

    print("[7] QR回帰: Atk = β₀ + β₁·HP")
    print(f"    回帰係数 β = {beta}")
    print(f"    決定係数 R² = {r2:.3f}")
    print(f"    残差 = {residuals}")

    plt.scatter(X[:, 0], y_atk, label='True Atk', color='blue')
    plt.plot(X[:, 0], y_pred, label='Predicted Atk', color='red')
    plt.xlabel("HP")
    plt.ylabel("Atk")
    plt.title("QR Regression: HP → Atk")
    plt.legend()
    plt.grid()
    plt.show()

# -------------------------------
# 8. テンソル分解(CP分解)
# -------------------------------
def tensor_decomposition():
    tensor = np.random.rand(6, 3, 2)  # ポケモン×特徴×状況(例:性格など)
    cp_result = parafac(tensor, rank=2)
    print("[8] CPテンソル分解成功(ランク2)")
    print("     各モードの因子形状:")
    for i, factor in enumerate(cp_result.factors):
        print(f"     モード{i+1}: {factor.shape}")

# -------------------------------
# メイン関数
# -------------------------------
def main():
    print("\n=== Pokémon × 機械学習 × 線形代数学 (Full Version) ===\n")
    projection_matrix()
    diagonalize_symmetric()
    gram_kernel_matrices()
    positive_definite_cov()
    matrix_exponential()
    lu_decomposition()
    qr_decomposition()
    tensor_decomposition()

if __name__ == "__main__":
    main()

from sympy import Matrix, symbols, eye, pprint, solve

# プログラム名: jordan_form_construction_conditions.py
# 内容: ポケモンの種族値から作成した行列に対して、ジョルダン標準形を求める
# Program: Jordan Canonical Form computation using Pokémon stats

# --- ポケモンの種族値行列 / Pokémon base stat matrix (example: Pikachu, Bulbasaur, Charmander) ---
# [HP, Attack, Defense]
A = Matrix([
    [35, 55, 40],    # Pikachu
    [45, 49, 49],    # Bulbasaur
    [39, 52, 43]     # Charmander
])

print("=== ポケモン種族値行列 A ===")
pprint(A)

# --- 固有値と代数的重複度 / Eigenvalues and their multiplicities ---
eigvals = A.eigenvals()
print("\n=== 固有値とその重複度 / Eigenvalues and Multiplicities ===")
for val, mult in eigvals.items():
    print(f"λ = {val}, 重複度 / multiplicity = {mult}")

# --- 固有ベクトルと幾何的重複度 / Eigenvectors and Geometric Multiplicity ---
print("\n=== 固有ベクトル / Eigenvectors ===")
eigvects = A.eigenvects()
for val, mult, vects in eigvects:
    print(f"\nλ = {val}")
    print(f"幾何的重複度 / Geometric multiplicity: {len(vects)}")
    print("固有ベクトル / Eigenvectors:")
    for v in vects:
        pprint(v)

# --- ジョルダン標準形の計算 / Jordan canonical form ---
P, J = A.jordan_form()
print("\n=== ジョルダン標準形 J / Jordan Form ===")
pprint(J)

print("\n=== ジョルダン変換行列 P / Jordan Basis Matrix ===")
pprint(P)

# --- 対角化可能性の判定 / Diagonalizability ---
is_diag = A.is_diagonalizable()
print("\n=== Aは対角化可能か? / Is A Diagonalizable? ===")
print("はい / Yes" if is_diag else "いいえ / No")



# プログラム名: pokemon_matrix_analysis.py
# 内容: ポケモン種族値に対するLU分解・固有値・SVD・PCA・NMFなどの線形代数解析を一括実施

import numpy as np
from scipy.linalg import lu, eig, svd, norm
from sklearn.decomposition import PCA, NMF
from numpy.linalg import eigvals

# --- サンプルポケモン種族値行列 / Pokémon base stats matrix ---
# 各行:ポケモン(例:ピカチュウ、フシギダネ、リザードン)
# 各列:HP, 攻撃, 防御, 特攻, 特防, 素早さ
A = np.array([
    [35, 55, 40, 50, 50, 90],   # Pikachu
    [45, 49, 49, 65, 65, 45],   # Bulbasaur
    [78, 84, 78, 109, 85, 100], # Charizard
    [130, 85, 80, 85, 95, 60],  # Gyarados
])

print("=== ポケモン種族値行列 A ===")
print(A)

# --- 1.4 LU分解 / LU Decomposition ---
P, L, U = lu(A)
print("\n[1.4] LU分解")
print("L =\n", L)
print("U =\n", U)

# --- 1.5 直交行列の作成と部分空間の基底(QR分解で代用) ---
Q, R = np.linalg.qr(A.T)
print("\n[1.5] QR分解(直交基底)")
print("Q(部分空間の基底) =\n", Q)

# --- 1.6 固有値と固有ベクトル / Eigenvalues & Eigenvectors ---
eigvals_, eigvecs = eig(A @ A.T)
print("\n[1.6] 固有値 =\n", eigvals_)
print("固有ベクトル(行列A A^Tの) =\n", eigvecs)

# --- 1.7 正定値対称性チェック(A^T A) ---
ATA = A.T @ A
eig_ATA = eigvals(ATA)
is_pos_def = np.all(eig_ATA > 0)
print("\n[1.7] A^T A の固有値 = ", eig_ATA)
print("→ 正定値?", "Yes" if is_pos_def else "No")

# --- 1.8 特異値分解 / Singular Value Decomposition ---
U_svd, S_svd, Vh_svd = svd(A)
print("\n[1.8] 特異値 = ", S_svd)

# --- 1.9 主成分分析(PCA)による次元削減 ---
pca = PCA(n_components=2)
A_pca = pca.fit_transform(A)
print("\n[1.9] PCAによる低次元表現(2次元) =\n", A_pca)

# --- 1.10 レイリー商の計算(最大固有値に対応) ---
x = np.random.rand(A.shape[1], 1)
x = x / norm(x)
rayleigh = (x.T @ ATA @ x) / (x.T @ x)
print("\n[1.10] レイリー商(A^T A) = ", rayleigh.item())

# --- 1.11 ノルムの計算(行列・ベクトル) ---
print("\n[1.11] 行列ノルム(AのFrobeniusノルム) = ", norm(A, 'fro'))
print("1番目のポケモン(ピカチュウ)の種族値ノルム = ", norm(A[0]))

# --- 1.12 非負値行列分解 / Non-negative Matrix Factorization ---
nmf = NMF(n_components=2, init='random', random_state=0)
W = nmf.fit_transform(A)
H = nmf.components_
print("\n[1.12] NMF: W(特徴行列) =\n", W)
print("NMF: H(基底行列) =\n", H)

# プログラム名: pokemon_stats_learning_embed.py
# 内容: 初代ポケモンの種族値をコード内に定義し、DNNでタイプ分類

import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
import numpy as np

# --- 初代ポケモンデータ定義(例:9体)---
pokemon_data = [
    # Name,         HP, Atk, Def, SpA, SpD, Spe, Type1
    ['Bulbasaur',    45,  49,  49,  65,  65,  45, 'Grass'],
    ['Ivysaur',      60,  62,  63,  80,  80,  60, 'Grass'],
    ['Venusaur',     80,  82,  83, 100, 100,  80, 'Grass'],
    ['Charmander',   39,  52,  43,  60,  50,  65, 'Fire'],
    ['Charmeleon',   58,  64,  58,  80,  65,  80, 'Fire'],
    ['Charizard',    78,  84,  78, 109,  85, 100, 'Fire'],
    ['Squirtle',     44,  48,  65,  50,  64,  43, 'Water'],
    ['Wartortle',    59,  63,  80,  65,  80,  58, 'Water'],
    ['Blastoise',    79,  83, 100,  85, 105,  78, 'Water']
]

# --- NumPy変換 ---
names = [row[0] for row in pokemon_data]
X = np.array([row[1:7] for row in pokemon_data], dtype=np.float32)
y = [row[7] for row in pokemon_data]

# --- ラベルエンコード&標準化 ---
y = LabelEncoder().fit_transform(y)
X = StandardScaler().fit_transform(X)

# --- 学習用データ変換 ---
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
X_train = torch.tensor(X_train, dtype=torch.float32)
X_test  = torch.tensor(X_test, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.long)
y_test  = torch.tensor(y_test, dtype=torch.long)

# --- DNNモデル定義 ---
class PokemonNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc = nn.Sequential(
            nn.Linear(6, 16),
            nn.ReLU(),
            nn.Linear(16, 3)  # Grass, Fire, Water
        )
    def forward(self, x):
        return self.fc(x)

# --- 学習 ---
model = PokemonNet()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)

for epoch in range(100):
    outputs = model(X_train)
    loss = criterion(outputs, y_train)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    if epoch % 20 == 0:
        print(f'Epoch {epoch}, Loss: {loss.item():.4f}')

# --- テスト評価 ---
with torch.no_grad():
    test_outputs = model(X_test)
    predicted = torch.argmax(test_outputs, axis=1)
    accuracy = (predicted == y_test).float().mean()
    print(f'Test Accuracy: {accuracy:.4f}')



# プログラム名: pokemon_matrix_methods_embed.py
# 内容: 初代ポケモン(埋め込みデータ)の行列操作(QR分解・PCA・回帰)

import numpy as np
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt

# --- 種族値データ定義(例:9体) ---
pokemon_data = [
    # Name,         HP, Atk, Def, SpA, SpD, Spe, Type
    ['Bulbasaur',    45, 49, 49, 65, 65, 45, 'Grass'],
    ['Ivysaur',      60, 62, 63, 80, 80, 60, 'Grass'],
    ['Venusaur',     80, 82, 83,100,100, 80, 'Grass'],
    ['Charmander',   39, 52, 43, 60, 50, 65, 'Fire'],
    ['Charmeleon',   58, 64, 58, 80, 65, 80, 'Fire'],
    ['Charizard',    78, 84, 78,109, 85,100, 'Fire'],
    ['Squirtle',     44, 48, 65, 50, 64, 43, 'Water'],
    ['Wartortle',    59, 63, 80, 65, 80, 58, 'Water'],
    ['Blastoise',    79, 83,100, 85,105, 78, 'Water']
]

names = [row[0] for row in pokemon_data]
X = np.array([row[1:7] for row in pokemon_data], dtype=np.float32)

# --- 2.1 QR分解 ---
Q, R = np.linalg.qr(X)
print("QR decomposition completed. Q shape:", Q.shape)

# --- 2.2 最小二乗法(HP ~ Atk + Def) ---
Y = X[:, 0].reshape(-1, 1)  # HP
A = X[:, [1, 2]]            # Attack, Defense
reg = LinearRegression().fit(A, Y)
print("Least Squares R²:", reg.score(A, Y))

# --- 2.3 PCAによる次元削減 ---
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
print("Explained Variance Ratio:", pca.explained_variance_ratio_)

# --- 2.4 可視化 ---
plt.figure(figsize=(6, 5))
plt.scatter(X_pca[:, 0], X_pca[:, 1], c='skyblue', edgecolor='black')
for i, name in enumerate(names):
    plt.text(X_pca[i, 0], X_pca[i, 1], name, fontsize=8)
plt.title("PCA of Gen 1 Pokémon Base Stats")
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.grid(True)
plt.tight_layout()
plt.show()

# Program Name: pokemon_matrix_analysis.py
# Creation Date: 20250423
# Overview: Analyze Pokémon base stats using spectral decomposition, matrix functions, eigenvalues, SVD, PCA, Gram-Schmidt orthogonalization, basis transformation, and projection
# Usage: To run the program, use the command `python pokemon_matrix_analysis.py` in the terminal

# --- ライブラリのインストール(必要な場合) / Install required libraries ---
# !pip install numpy matplotlib scipy

# --- ライブラリのインポート / Import libraries ---
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import expm, inv

# --- 数値の設定 / Define parameters and data in one place ---
stats = np.array([
    [45, 49, 49],   # Bulbasaur
    [60, 62, 63],   # Ivysaur
    [80, 82, 83]    # Venusaur
])
A = np.cov(stats, rowvar=False)

# === 固有値分解 / Eigen decomposition ===
eigvals, eigvecs = np.linalg.eig(A)

# === 正則性チェック / Invertibility ===
is_invertible = np.linalg.det(A) != 0
A_inv = inv(A) if is_invertible else None

# === 行列指数関数 / Matrix exponential ===
A_exp = expm(A)

# === 基底チェック / Check if eigenvectors form a basis ===
is_basis = np.linalg.matrix_rank(eigvecs) == eigvecs.shape[1]

# === 射影行列 / Projection matrix ===
v = eigvecs[:, 0].reshape(-1, 1)
P = v @ v.T / (v.T @ v)

# === 特異値分解 / Singular Value Decomposition ===
U, S, Vt = np.linalg.svd(stats)

# === 固有ベクトル基底への変換 / Basis transformation ===
transformed_stats = stats @ eigvecs

# === 主成分分析(PCA) / Principal Component Analysis ===
mean_centered = stats - np.mean(stats, axis=0)         # 平均中心化 / Mean centering
pca_transformed = mean_centered @ eigvecs              # 固有ベクトル基底への投影 / Projection onto eigenbasis

# === グラム・シュミット直交化法 / Gram-Schmidt Orthogonalization ===
def gram_schmidt(X):
    Q = []
    for v in X.T:  # 各列ベクトルに対して / Iterate over column vectors
        for q in Q:
            v = v - np.dot(q, v) * q
        v = v / np.linalg.norm(v)
        Q.append(v)
    return np.column_stack(Q)

orthonormal_basis = gram_schmidt(stats)

# --- グラフ描画: 固有値・特異値・PCA成分 --- 
plt.figure(figsize=(12, 4))

# 固有値プロット
plt.subplot(1, 3, 1)
plt.bar(['λ₁', 'λ₂', 'λ₃'], eigvals.real)
plt.title("Eigenvalues")
plt.xlabel("Index")
plt.ylabel("Value")
plt.grid(True)

# 特異値プロット
plt.subplot(1, 3, 2)
plt.bar(['σ₁', 'σ₂', 'σ₃'], S)
plt.title("Singular Values")
plt.xlabel("Index")
plt.ylabel("Value")
plt.grid(True)

# PCA可視化(第1, 第2主成分)
plt.subplot(1, 3, 3)
plt.scatter(pca_transformed[:, 0], pca_transformed[:, 1], c='orange')
plt.title("PCA (1st vs 2nd PC)")
plt.xlabel("Principal Component 1")
plt.ylabel("Principal Component 2")
plt.grid(True)

plt.tight_layout()
plt.show()

# --- 出力 / Output ---
print("=== Spectral Decomposition ===")
print("Eigenvalues:\n", eigvals)
print("Eigenvectors:\n", eigvecs)

print("\n=== Regular Matrix Check ===")
print(f"Determinant: {np.linalg.det(A):.4f}")
print(f"Invertible: {is_invertible}")
if is_invertible:
    print("Inverse Matrix:\n", A_inv)

print("\n=== Matrix Exponential ===")
print("exp(A):\n", A_exp)

print("\n=== Basis Check ===")
print(f"Do eigenvectors form a basis? {is_basis}")

print("\n=== Projection Matrix onto v₁ ===")
print("Projection Matrix P:\n", P)

print("\n=== Singular Value Decomposition ===")
print("U:\n", U)
print("Singular Values:\n", S)
print("V^T:\n", Vt)

print("\n=== Basis Transformation ===")
print("Stats matrix in eigenvector basis:\n", transformed_stats)

print("\n=== Principal Component Analysis (PCA) ===")
print("Mean-centered stats projected onto eigenvectors:\n", pca_transformed)

print("\n=== Gram-Schmidt Orthogonalization ===")
print("Orthonormal basis vectors:\n", orthonormal_basis)

# Program Name: pokemon_regression_nn.py
# Creation Date: 20250423
# Overview: Use a neural network to regress total base stats from individual stats (HP, Atk, Def)
# Usage: Run using `python pokemon_regression_nn.py`

import numpy as np

# --- データ定義 / Pokémon base stats (input) and total stats (target) ---
X = np.array([
    [45, 49, 49],   # Bulbasaur
    [60, 62, 63],   # Ivysaur
    [80, 82, 83],   # Venusaur
    [39, 52, 43],   # Charmander
    [58, 64, 58],   # Charmeleon
    [78, 84, 78],   # Charizard
])
y = np.sum(X, axis=1, keepdims=True)  # 合計種族値を回帰ターゲットに / Regression target: total stats

# --- 正規化 / Normalize input ---
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
X_norm = (X - X_mean) / X_std  # 標準化 / Standardization

# --- パラメータ初期化 / Initialize weights and biases ---
np.random.seed(42)
input_dim = X.shape[1]
hidden_dim = 4
output_dim = 1

W1 = np.random.randn(input_dim, hidden_dim)
b1 = np.random.randn(1, hidden_dim)
W2 = np.random.randn(hidden_dim, output_dim)
b2 = np.random.randn(1, output_dim)

# --- 活性化関数 / Activation ---
def relu(x): return np.maximum(0, x)

# --- 学習パラメータ / Training settings ---
lr = 0.01
epochs = 1000

# --- 学習ループ / Training loop ---
for epoch in range(epochs):
    # 順伝播 / Forward
    Z1 = X_norm @ W1 + b1
    A1 = relu(Z1)
    Z2 = A1 @ W2 + b2
    y_pred = Z2  # 回帰なので出力層は恒等関数 / Linear activation

    # 損失関数: MSE / Mean Squared Error
    loss = np.mean((y - y_pred) ** 2)

    # 逆伝播 / Backward
    dZ2 = 2 * (y_pred - y) / y.shape[0]
    dW2 = A1.T @ dZ2
    db2 = np.sum(dZ2, axis=0, keepdims=True)

    dA1 = dZ2 @ W2.T
    dZ1 = dA1 * (Z1 > 0)  # ReLUの微分
    dW1 = X_norm.T @ dZ1
    db1 = np.sum(dZ1, axis=0, keepdims=True)

    # パラメータ更新 / Update parameters
    W1 -= lr * dW1
    b1 -= lr * db1
    W2 -= lr * dW2
    b2 -= lr * db2

    if epoch % 100 == 0:
        print(f"Epoch {epoch:4d}: Loss = {loss:.4f}")

# --- 予測結果 / Final predictions ---
print("\n=== Final Prediction ===")
for i in range(len(X)):
    print(f"Input: {X[i]} | True Total: {int(y[i][0])} | Predicted: {float(y_pred[i]):.2f}")

# Program Name: pokemon_weight_mask_attention.py
# Creation Date: 20250423
# Overview: Apply weighted scoring, feature masking, and attention-like element-wise scaling to Pokémon stats
# Usage: Run using `python pokemon_weight_mask_attention.py`

import numpy as np
import matplotlib.pyplot as plt

# --- ポケモン種族値(例) / Example base stats: HP, Atk, Def, SpA, SpD, Spe ---
stats = np.array([
    [45, 49, 49, 65, 65, 45],   # Bulbasaur
    [60, 62, 63, 80, 80, 60],   # Ivysaur
    [80, 82, 83, 100, 100, 80], # Venusaur
])

pokemon_names = ["Bulbasaur", "Ivysaur", "Venusaur"]
features = ["HP", "Atk", "Def", "SpA", "SpD", "Spe"]

# === 1. 重み付きスコア計算 / Weighted total score ===
# 各ステータスに重要度を付与(例:素早さと特攻を重視)
weights = np.array([0.1, 0.15, 0.15, 0.25, 0.25, 0.1])  # 合計=1

# 各ポケモンにスコアを付ける / Weighted sum per Pokémon
weighted_scores = stats @ weights

# === 2. 特徴量マスク / Feature masking ===
# 例:防御系(Def, SpD)だけを残す → マスクで選択
mask = np.array([0, 0, 1, 0, 1, 0])  # 1を残す、0を無視
masked_stats = np.multiply(stats, mask)

# === 3. Attention風アダマール積 / Attention-like scaling ===
# 各特徴に対して学習された「注目度」スコア(例)
attention_weights = np.array([1.0, 0.8, 1.2, 1.5, 1.3, 0.7])
attention_scaled = np.multiply(stats, attention_weights)

# --- 結果表示 / Output ---
print("=== Weighted Score ===")
for name, score in zip(pokemon_names, weighted_scores):
    print(f"{name}: {score:.2f}")

print("\n=== Feature Masking (Def & SpD) ===")
print(masked_stats)

print("\n=== Attention-like Scaled Stats ===")
print(attention_scaled)

# --- 可視化(棒グラフ) / Visualization ---
plt.figure(figsize=(10, 4))
x = np.arange(len(pokemon_names))
plt.bar(x, weighted_scores, color='mediumseagreen')
plt.xticks(x, pokemon_names)
plt.title("Weighted Score of Pokémon")
plt.ylabel("Score")
plt.grid(True)
plt.tight_layout()
plt.show()
# Program Name: pokemon_cosine_similarity.py
# Creation Date: 20250423
# Overview: Compute cosine similarity between Pokémon base stats
# Usage: Run using `python pokemon_cosine_similarity.py`

import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity

# --- 種族値データ(例) / Base stats for HP, Atk, Def, SpA, SpD, Spe ---
pokemon_names = ["Bulbasaur", "Ivysaur", "Venusaur", "Charmander", "Charmeleon", "Charizard"]

stats = np.array([
    [45, 49, 49, 65, 65, 45],    # Bulbasaur
    [60, 62, 63, 80, 80, 60],    # Ivysaur
    [80, 82, 83, 100, 100, 80],  # Venusaur
    [39, 52, 43, 60, 50, 65],    # Charmander
    [58, 64, 58, 80, 65, 80],    # Charmeleon
    [78, 84, 78, 109, 85, 100],  # Charizard
])

# --- コサイン類似度の計算 / Compute cosine similarity matrix ---
similarity_matrix = cosine_similarity(stats)

# --- データフレーム化 / Display as DataFrame for readability ---
df_similarity = pd.DataFrame(similarity_matrix, index=pokemon_names, columns=pokemon_names)

# --- 表示 / Output ---
print("=== Cosine Similarity Matrix ===")
print(df_similarity.round(3))

# --- 可視化(ヒートマップ)/ Visualization (heatmap) ---
import seaborn as sns
import matplotlib.pyplot as plt

plt.figure(figsize=(8, 6))
sns.heatmap(df_similarity, annot=True, cmap='YlGnBu', fmt=".2f")
plt.title("Cosine Similarity of Pokémon Base Stats")
plt.tight_layout()
plt.show()

# Program Name: pokemon_linear_algebra_full.py
# Creation Date: 20250423
# Overview: Analyze Pokémon base stats using inner product, cosine similarity, linear independence, eigen-decomposition, and diagonalization
# Usage: Run with `python pokemon_linear_algebra_full.py`

import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
from numpy.linalg import norm, eig, matrix_rank, inv
import seaborn as sns
import matplotlib.pyplot as plt

# --- 種族値とラベル / Pokémon base stats and labels ---
pokemon_names = ["Bulbasaur", "Ivysaur", "Venusaur", "Charmander", "Charmeleon", "Charizard"]
stat_labels = ["HP", "Atk", "Def", "SpA", "SpD", "Spe"]
stats = np.array([
    [45, 49, 49, 65, 65, 45],
    [60, 62, 63, 80, 80, 60],
    [80, 82, 83, 100, 100, 80],
    [39, 52, 43, 60, 50, 65],
    [58, 64, 58, 80, 65, 80],
    [78, 84, 78, 109, 85, 100]
])

# --- データフレーム表示 / Display stats table ---
df_stats = pd.DataFrame(stats, index=pokemon_names, columns=stat_labels)
print("=== Pokémon Base Stats ===")
print(df_stats)

# --- 内積行列 / Inner product matrix ---
inner_product = stats @ stats.T
df_inner = pd.DataFrame(inner_product, index=pokemon_names, columns=pokemon_names)
print("\n=== Inner Product Matrix ===")
print(df_inner)

# --- コサイン類似度行列 / Cosine similarity matrix ---
cos_sim = cosine_similarity(stats)
df_cos = pd.DataFrame(cos_sim, index=pokemon_names, columns=pokemon_names)
print("\n=== Cosine Similarity Matrix ===")
print(df_cos.round(3))

# --- 線形独立性判定 / Linear independence ---
rank = matrix_rank(stats)
independence = (rank == stats.shape[0])
print(f"\n=== Linear Independence ===\nRank = {rank}, Linearly Independent: {independence}")

# --- 共分散行列と固有値分解 / Covariance and eigendecomposition ---
cov_matrix = np.cov(stats.T)
eigvals, eigvecs = eig(cov_matrix)
print("\n=== Eigenvalues ===")
print(eigvals)
print("\n=== Eigenvectors (columns) ===")
print(np.round(eigvecs, 3))

# --- 対角化と再構成チェック / Diagonalization A ≈ PDP⁻¹ ---
D = np.diag(eigvals)
P = eigvecs
A_reconstructed = P @ D @ inv(P)
print("\n=== Diagonalized Matrix Reconstruction (approx.) ===")
print(np.round(A_reconstructed, 2))

# --- 類似度ヒートマップ / Cosine similarity heatmap ---
plt.figure(figsize=(7, 6))
sns.heatmap(df_cos, annot=True, fmt=".2f", cmap="YlGnBu")
plt.title("Cosine Similarity of Pokémon Stats")
plt.tight_layout()
plt.show()

0
1
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
1

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?