0
0

高校数学と機械学習

Last updated at Posted at 2024-08-14
import numpy as np
import matplotlib.pyplot as plt

# Assign values to A and B
A = 10
B = 24

# Define the range of x values
x_values = np.linspace(-10, 20, 400)

# Compute corresponding y values for each equation
# From equation 1: y = A - x
y1 = A - x_values

# From equation 2: y = (B - 2x) / 4
y2 = (B - 2 * x_values) / 4

# Plot both lines
plt.plot(x_values, y1, label=f'x + y = {A}')
plt.plot(x_values, y2, label=f'2x + 4y = {B}')

# Solve for the intersection point
# Coefficient matrix
coefficients = np.array([[1, 1],
                         [2, 4]])

# Constants vector
constants = np.array([A, B])

# Use numpy's linear algebra solver
solution = np.linalg.solve(coefficients, constants)
x_intersect, y_intersect = solution

# Plot the intersection point
plt.plot(x_intersect, y_intersect, 'ro', label='Intersection Point')

# Calculate and plot the slope indicators
# Slope of the first line: -1
plt.arrow(0, A, 1, -1, head_width=0.5, head_length=1, fc='blue', ec='blue', label='Slope -1')

# Slope of the second line: -1/2
plt.arrow(0, B/4, 2, -1, head_width=0.5, head_length=1, fc='green', ec='green', label='Slope -1/2')

# Configure the plot
plt.xlabel('x')
plt.ylabel('y')
plt.title('Plot of Two Lines with Slopes Indicated')
plt.legend()
plt.grid(True)
plt.axis('equal')  # To ensure equal scaling

# Display the plot
plt.show()

# Print the solution
print(f'The solution to the system is x = {x_intersect}, y = {y_intersect}')

import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPRegressor
from scipy.optimize import minimize
from sklearn.preprocessing import StandardScaler

# Define the coefficients and constants of the system
A = 10
B = 24

# Create the dataset
X = np.array([[1, 1], [2, 4]])  # Coefficients
y = np.array([A, B])  # Constants

# Standardize the data
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# Linear Regression
lin_reg = LinearRegression()
lin_reg.fit(X_scaled, y)
lr_solution = lin_reg.coef_

# Neural Network
mlp = MLPRegressor(hidden_layer_sizes=(10, 10), max_iter=20000, solver='adam', activation='relu')
mlp.fit(X_scaled, y)
nn_solution = mlp.coefs_[0].flatten()

# Optimization using Gradient Descent
def loss_function(w):
    return np.sum((np.dot(X, w) - y) ** 2)

initial_guess = np.zeros(X.shape[1])
opt_result = minimize(loss_function, initial_guess, method='BFGS')
opt_solution = opt_result.x

# Display the solutions
print(f"Linear Regression Solution: x = {lr_solution[0]}, y = {lr_solution[1]}")
print(f"Neural Network Solution: x = {nn_solution[0]}, y = {nn_solution[1]}")
print(f"Optimization Solution: x = {opt_solution[0]}, y = {opt_solution[1]}")

# Plotting the original lines for reference
x_values = np.linspace(-10, 20, 400)
y1 = A - x_values
y2 = (B - 2 * x_values) / 4

plt.plot(x_values, y1, label=f'x + y = {A}')
plt.plot(x_values, y2, label=f'2x + 4y = {B}')

# Plot the solutions
plt.plot(lr_solution[0], lr_solution[1], 'bo', label='Linear Regression Solution')
plt.plot(nn_solution[0], nn_solution[1], 'go', label='Neural Network Solution')
plt.plot(opt_solution[0], opt_solution[1], 'ro', label='Optimization Solution')

# Configure the plot
plt.xlabel('x')
plt.ylabel('y')
plt.title('Solutions using Linear Regression, Neural Network, and Optimization')
plt.legend()
plt.grid(True)
plt.show()

import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPRegressor
from scipy.optimize import minimize
from sklearn.preprocessing import StandardScaler

# Define the coefficients and constants of the system
A = 10
B = 24

# Create the dataset
X = np.array([[1, 1], [2, 4]])  # Coefficients
y = np.array([A, B])  # Constants

# Standardize the data
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# Linear Regression
lin_reg = LinearRegression()
lin_reg.fit(X_scaled, y)
lr_solution = lin_reg.coef_

# Neural Network
mlp = MLPRegressor(hidden_layer_sizes=(10, 10), max_iter=20000, solver='adam', activation='relu')
mlp.fit(X_scaled, y)
nn_solution = mlp.coefs_[0].flatten()

# Optimization using Gradient Descent
def loss_function(w):
    return np.sum((np.dot(X, w) - y) ** 2)

initial_guess = np.zeros(X.shape[1])
opt_result = minimize(loss_function, initial_guess, method='BFGS')
opt_solution = opt_result.x

# Display the solutions
print(f"Linear Regression Solution: x = {lr_solution[0]}, y = {lr_solution[1]}")
print(f"Neural Network Solution: x = {nn_solution[0]}, y = {nn_solution[1]}")
print(f"Optimization Solution: x = {opt_solution[0]}, y = {opt_solution[1]}")

# Plotting the original lines for reference
x_values = np.linspace(-10, 20, 400)
y1 = A - x_values
y2 = (B - 2 * x_values) / 4

plt.plot(x_values, y1, label=f'x + y = {A}')
plt.plot(x_values, y2, label=f'2x + 4y = {B}')

# Plot the solutions
plt.plot(lr_solution[0], lr_solution[1], 'bo', label='Linear Regression Solution')
plt.plot(nn_solution[0], nn_solution[1], 'go', label='Neural Network Solution')
plt.plot(opt_solution[0], opt_solution[1], 'ro', label='Optimization Solution')

# Configure the plot
plt.xlabel('x')
plt.ylabel('y')
plt.title('Solutions using Linear Regression, Neural Network, and Optimization')
plt.legend()
plt.grid(True)
plt.show()


import numpy as np
import matplotlib.pyplot as plt

# データの生成
np.random.seed(0)  # 再現性のためのシード設定
time = np.linspace(0, 10, 50)  # 0秒から10秒までの50ポイント
velocity = 2  # 速度 (m/s)
distance = velocity * time + np.random.normal(scale=0.5, size=time.shape)  # 移動距離にノイズを追加

# 最小二乗法による直線フィット
# 行列Aの作成(時間と定数項のマトリックス)
A = np.vstack([time, np.ones(len(time))]).T

# 最小二乗法によるパラメータの計算
m, b = np.linalg.lstsq(A, distance, rcond=None)[0]

# フィットした直線の計算
fit_line = m * time + b

# プロット
plt.figure(figsize=(8, 6))
plt.scatter(time, distance, color='red', label='Noisy Data Points')  # データポイント
plt.plot(time, fit_line, color='blue', label=f'Fitted Line: y = {m:.2f}x + {b:.2f}')  # フィットした直線
plt.xlabel('Time (seconds)')
plt.ylabel('Distance (meters)')
plt.title('Distance vs. Time with Least Squares Fit')
plt.legend()
plt.grid(True)
plt.show()

import numpy as np
import matplotlib.pyplot as plt

# データの生成
np.random.seed(0)  # 再現性のためのシード設定
time = np.linspace(0, 10, 50)  # 0秒から10秒までの50ポイント
g = 9.8  # 重力加速度 (m/s^2)
v0 = 0  # 初速度 (m/s)
y0 = 0  # 初期位置 (m)

# 自由落下のデータ生成 (ノイズを追加)
distance = -0.5 * g * time**2 + v0 * time + y0 + np.random.normal(scale=0.5, size=time.shape)

# 行列形式でデータを準備
X = np.vstack([time**2, time, np.ones(len(time))]).T  # 二次項、一次項、定数項

# 最小二乗法によるパラメータの計算
coefficients = np.linalg.lstsq(X, distance, rcond=None)[0]
a, b, c = coefficients

# フィットした二次関数の計算
fit_line = a * time**2 + b * time + c

# プロット
plt.figure(figsize=(8, 6))
plt.scatter(time, distance, color='red', label='Noisy Data Points')  # データポイント
plt.plot(time, fit_line, color='blue', label=f'Fitted Quadratic: y = {a:.2f}t^2 + {b:.2f}t + {c:.2f}')  # フィットした二次関数
plt.xlabel('Time (seconds)')
plt.ylabel('Distance (meters)')
plt.title('Distance vs. Time with Quadratic Fit')
plt.legend()
plt.grid(True)
plt.show()


import numpy as np
import matplotlib.pyplot as plt

# Initial conditions
x0 = 0    # Initial position (m)
v0 = 5    # Initial velocity (m/s)
a = 2     # Acceleration (m/s^2)

# Time range
t = np.linspace(0, 10, 100)  # Generate 100 points from 0 to 10 seconds

# Calculate position
x = x0 + v0 * t + 0.5 * a * t**2

# Plot
plt.figure(figsize=(8, 6))
plt.plot(t, x, label='Position x(t)')
plt.xlabel('Time (seconds)')
plt.ylabel('Position (meters)')
plt.title('Position vs Time for Uniformly Accelerated Motion')
plt.legend()
plt.grid(True)
plt.show()


from scipy.optimize import minimize

# 定義する目的関数
def objective(vars):
    x, y = vars
    return - (x + y)  # 最大化問題にするために符号を反転

# 制約条件
def constraint(vars):
    x, y = vars
    return 1 - (x**2 + y**2)  # x^2 + y^2 <= 1 の形式

# 初期値
initial_guess = [0, 0]

# 制約の設定
con = {'type': 'ineq', 'fun': constraint}

# 最適化の実行
result = minimize(objective, initial_guess, constraints=con)

# 結果の表示
x_opt, y_opt = result.x
f_opt = -result.fun

# グラフのプロット
theta = np.linspace(0, 2*np.pi, 100)
x_circle = np.cos(theta)
y_circle = np.sin(theta)

plt.figure(figsize=(8, 6))
plt.plot(x_circle, y_circle, label='Constraint: x^2 + y^2 = 1')
plt.scatter(x_opt, y_opt, color='red', label=f'Optimal point at ({x_opt:.2f}, {y_opt:.2f}), f(x, y)={f_opt:.2f}')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Optimization with Constraint')
plt.legend()
plt.grid(True)
plt.axis('equal')
plt.show()

import numpy as np
import matplotlib.pyplot as plt

# Define the quadratic function
def f(x):
    a = 1
    b = -4
    c = 4
    return a * x**2 + b * x + c

# Define the cost function for gradient descent
def cost_function(x):
    return x**2 - 4*x + 4

# Gradient of the cost function
def gradient(x):
    return 2*x - 4

# Gradient Descent Algorithm
def gradient_descent(starting_point, learning_rate, num_iterations):
    x = starting_point
    history = [x]
    for _ in range(num_iterations):
        x = x - learning_rate * gradient(x)
        history.append(x)
    return x, history

# Parameters for the quadratic function
a = 1
b = -4
c = 4

# Completing the square
vertex_x = -b / (2 * a)
vertex_y = f(vertex_x)

# Parameters for gradient descent
starting_point = 0.0
learning_rate = 0.1
num_iterations = 50

# Run Gradient Descent
optimal_x, history = gradient_descent(starting_point, learning_rate, num_iterations)
optimal_cost = cost_function(optimal_x)

# Create x values for plotting
x = np.linspace(-1, 5, 100)
y = f(x)

# Plot the quadratic function, its minimum, and gradient descent path
plt.figure(figsize=(12, 6))

# Plot the quadratic function
plt.plot(x, y, label='f(x) = x^2 - 4x + 4', color='blue')

# Plot the minimum point from completing the square
plt.scatter(vertex_x, vertex_y, color='red', zorder=5, label=f'Minimum at x={vertex_x:.2f}, f(x)={vertex_y:.2f}')

# Plot the gradient descent path
history_x = np.array(history)
history_y = cost_function(history_x)
plt.scatter(history_x, history_y, color='green', zorder=5, label='Gradient Descent Path')

plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Quadratic Function, Minimum Value, and Gradient Descent Path')
plt.legend()
plt.grid(True)
plt.show()

import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize

# Objective function
def objective(vars):
    x, y = vars
    return x**2 + y**2

# Constraint function
def constraint(vars):
    x, y = vars
    return x + 2*y - 5

# Initial guess
initial_guess = [0, 0]

# Define constraints and bounds
con = {'type': 'eq', 'fun': constraint}

# Perform the optimization
result = minimize(objective, initial_guess, constraints=con)

# Extract the optimal values
x_opt, y_opt = result.x
min_value = result.fun

print(f"Optimal values: x = {x_opt:.2f}, y = {y_opt:.2f}")
print(f"Minimum value of x^2 + y^2: {min_value:.2f}")

# Create a grid of points
x = np.linspace(-1, 6, 400)
y = np.linspace(-1, 3, 400)
X, Y = np.meshgrid(x, y)
Z = X**2 + Y**2

# Plot the contours of x^2 + y^2
plt.figure(figsize=(8, 6))
cp = plt.contour(X, Y, Z, levels=20, cmap='viridis')
plt.colorbar(cp, label='Objective Function: $x^2 + y^2$')

# Plot the constraint line x + 2y = 5
y_line = (5 - x) / 2
plt.plot(x, y_line, 'r--', label='$x + 2y = 5$')

# Plot the optimal point
plt.plot(x_opt, y_opt, 'bo', label='Optimal Point')

plt.xlabel('$x$')
plt.ylabel('$y$')
plt.title('Contour plot of $x^2 + y^2$ and constraint $x + 2y = 5$')
plt.legend()
plt.grid(True)
plt.show()

# 3D plot
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111, projection='3d')

# Plot the surface
ax.plot_surface(X, Y, Z, cmap='viridis', alpha=0.8)

# Plot the constraint line in 3D
x_line = np.linspace(-1, 6, 400)
y_line = (5 - x_line) / 2
X_line, Y_line = np.meshgrid(x_line, y_line)
Z_line = X_line**2 + Y_line**2
ax.plot_surface(X_line, Y_line, Z_line, color='r', alpha=0.5, linewidth=0, antialiased=False)

# Plot the optimal point
ax.scatter(x_opt, y_opt, min_value, color='b', s=100, label='Optimal Point')

ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$x^2 + y^2$')
ax.set_title('3D plot of $x^2 + y^2$ and constraint $x + 2y = 5$')
ax.legend()

plt.show()


import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize

# Objective function
def objective(vars):
    x, y = vars
    return x**2 + y**2

# Constraint function
def constraint(vars):
    x, y = vars
    return x + 2*y - 5

# Initial guess
initial_guess = [0, 0]

# Define constraints and bounds
con = {'type': 'eq', 'fun': constraint}

# Perform the optimization using SLSQP method
result = minimize(objective, initial_guess, constraints=con, method='SLSQP')

# Extract the optimal values
x_opt, y_opt = result.x
min_value = result.fun

print(f"Optimal values: x = {x_opt:.2f}, y = {y_opt:.2f}")
print(f"Minimum value of x^2 + y^2: {min_value:.2f}")

# Create a grid of points
x = np.linspace(-1, 6, 400)
y = np.linspace(-1, 3, 400)
X, Y = np.meshgrid(x, y)
Z = X**2 + Y**2

# Plot the contours of x^2 + y^2
plt.figure(figsize=(8, 6))
cp = plt.contour(X, Y, Z, levels=20, cmap='viridis')
plt.colorbar(cp, label='Objective Function: $x^2 + y^2$')

# Plot the constraint line x + 2y = 5
y_line = (5 - x) / 2
plt.plot(x, y_line, 'r--', label='$x + 2y = 5$')

# Plot the optimal point
plt.plot(x_opt, y_opt, 'bo', label='Optimal Point')

plt.xlabel('$x$')
plt.ylabel('$y$')
plt.title('Contour plot of $x^2 + y^2$ and constraint $x + 2y = 5$')
plt.legend()
plt.grid(True)
plt.show()

import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize

# Objective function
def objective(vars):
    x, y = vars
    return x**2 + y**2

# Constraint function
def constraint(vars):
    x, y = vars
    return x + 2*y - 5

# Initial guess
initial_guess = [0, 0]

# Define constraints and bounds
con = {'type': 'eq', 'fun': constraint}

# Perform the optimization using improved settings
result = minimize(objective, initial_guess, constraints=con, method='SLSQP', options={'disp': True, 'maxiter': 1000})

# Extract the optimal values
x_opt, y_opt = result.x
min_value = result.fun

print(f"Optimal values with improved settings: x = {x_opt:.2f}, y = {y_opt:.2f}")
print(f"Minimum value of x^2 + y^2 with improved settings: {min_value:.2f}")

# Create a grid of points
x = np.linspace(-1, 6, 400)
y = np.linspace(-1, 3, 400)
X, Y = np.meshgrid(x, y)
Z = X**2 + Y**2

# Plot the contours of x^2 + y^2
plt.figure(figsize=(8, 6))
cp = plt.contour(X, Y, Z, levels=20, cmap='viridis')
plt.colorbar(cp, label='Objective Function: $x^2 + y^2$')

# Plot the constraint line x + 2y = 5
y_line = (5 - x) / 2
plt.plot(x, y_line, 'r--', label='$x + 2y = 5$')

# Plot the optimal point with improved settings
plt.plot(x_opt, y_opt, 'bo', label='Optimal Point with Improved Settings')

plt.xlabel('$x$')
plt.ylabel('$y$')
plt.title('Contour plot of $x^2 + y^2$ and constraint $x + 2y = 5$ with Improved Settings')
plt.legend()
plt.grid(True)
plt.show()


import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.cluster import KMeans
from sklearn.datasets import make_classification
from sklearn.preprocessing import StandardScaler

# 点と直線の距離を計算する関数
def point_line_distance(x0, y0, a, b, c):
    return np.abs(a * x0 + b * y0 + c) / np.sqrt(a**2 + b**2)

# データ生成
X, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, random_state=42)

# データの標準化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# SVMで分類
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X_scaled, y)

# 直線の係数
w = clf.coef_[0]
b = clf.intercept_[0]
a, b = -w[0], -w[1]
c = -b

# 各点から直線までの距離を計算
distances = np.array([point_line_distance(x, y, a, b, c) for x, y in X_scaled])

# K-meansクラスタリング
kmeans = KMeans(n_clusters=2, random_state=42)
kmeans.fit(X_scaled)
labels = kmeans.labels_

# プロット
plt.figure(figsize=(12, 6))

# SVMの決定境界
xx, yy = np.meshgrid(np.linspace(X_scaled[:, 0].min(), X_scaled[:, 0].max(), 500),
                     np.linspace(X_scaled[:, 1].min(), X_scaled[:, 1].max(), 500))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)

plt.contour(xx, yy, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--'])

# K-meansクラスタリングの結果
plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels, s=50, cmap='viridis')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], c='red', s=200, alpha=0.75, marker='X')

plt.title('SVM and K-means Clustering')
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.show()


import numpy as np
import matplotlib.pyplot as plt

# Sample data
x = np.array([1, 2, 3, 4, 5])
y = np.array([2, 4, 5, 4, 5])

# Calculate means
x_mean = np.mean(x)
y_mean = np.mean(y)

# Calculate covariance and variance
S_xy = np.mean((x - x_mean) * (y - y_mean))
S_x2 = np.mean((x - x_mean) ** 2)

# Calculate coefficients a and intercept b
a = S_xy / S_x2
b = y_mean - a * x_mean

# Linear function
def linear_function(x):
    return a * x + b

# Calculate residuals
predicted_y = linear_function(x)
residuals = y - predicted_y

# Plot data points, the fitted line, and residuals
plt.figure(figsize=(10, 6))

# Plot data points
plt.scatter(x, y, color='blue', label='Data Points', zorder=5)

# Plot fitted line
plt.plot(x, predicted_y, color='red', label='Fitted Line', linewidth=2, zorder=4)

# Plot residuals
for i in range(len(x)):
    plt.plot([x[i], x[i]], [y[i], predicted_y[i]], color='green', linestyle='--', zorder=3)

# Graph details
plt.xlabel('x')
plt.ylabel('y')
plt.title('Least Squares Fitting with Residuals')
plt.legend()
plt.grid(True)
plt.show()

# Display results
print(f"Mean of x: {x_mean}")
print(f"Mean of y: {y_mean}")
print(f"Covariance S_xy: {S_xy}")
print(f"Variance S_x2: {S_x2}")
print(f"Coefficient a: {a}")
print(f"Intercept b: {b}")
print(f"Residuals: {residuals}")


from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.metrics import classification_report

# Generate synthetic dataset
X, y = make_classification(n_samples=1000, n_features=10, n_classes=2, random_state=42)

# Split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# Standardize features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# Train SVM model
model = SVC(kernel='linear')
model.fit(X_train_scaled, y_train)

# Predict and evaluate
y_pred = model.predict(X_test_scaled)
print(classification_report(y_test, y_pred))

import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score

# データの生成
np.random.seed(0)
n_samples = 1000
X = np.linspace(0, 4 * np.pi, n_samples)
y = np.sin(X) > 0  # サイン波の正の部分をクラス1、負の部分をクラス0としてラベル付け

# サイン波とコサイン波を生成
X_data = np.vstack((np.sin(X), np.cos(X))).T
y_data = y.astype(int)

# データをトレーニングセットとテストセットに分割
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size=0.3, random_state=0)

# ロジスティック回帰モデルの訓練
model = LogisticRegression()
model.fit(X_train, y_train)

# テストセットで予測
y_pred = model.predict(X_test)

# 精度を表示
accuracy = accuracy_score(y_test, y_pred)
print(f'Accuracy: {accuracy:.2f}')

# データと決定境界をプロット
plt.figure(figsize=(10, 6))

# プロットする領域を設定
x_min, x_max = X_data[:, 0].min() - 1, X_data[:, 0].max() + 1
y_min, y_max = X_data[:, 1].min() - 1, X_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100), np.linspace(y_min, y_max, 100))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)

# 決定境界をプロット
plt.contourf(xx, yy, Z, alpha=0.3, cmap='coolwarm')
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, edgecolor='k', cmap='coolwarm', marker='o')
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, edgecolor='k', cmap='coolwarm', marker='x')
plt.title('Logistic Regression Decision Boundary')
plt.xlabel('Sin(x)')
plt.ylabel('Cos(x)')
plt.colorbar()
plt.show()


import numpy as np
import matplotlib.pyplot as plt
from numpy.fft import fft, fftfreq

# サンプリングパラメータ
sampling_rate = 1000  # サンプリングレート (Hz)
T = 1.0 / sampling_rate  # サンプリング間隔 (秒)
L = 1.0  # 信号の長さ (秒)
N = int(L * sampling_rate)  # 信号のサンプル数

# 時間軸の生成
t = np.linspace(0.0, L, N, endpoint=False)

# 増加する指数関数の信号生成
A = 1.0  # 振幅
B = 5.0  # 成長率
signal = A * np.exp(B * t)

# FFTの計算
yf = fft(signal)
xf = fftfreq(N, T)[:N//2]

# プロット
plt.figure(figsize=(12, 6))

# 増加する指数関数の信号のプロット
plt.subplot(1, 2, 1)
plt.plot(t, signal)
plt.title('Exponential Signal')
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')

# FFT結果のプロット
plt.subplot(1, 2, 2)
plt.plot(xf, np.abs(yf[:N//2]), label='Magnitude')
plt.title('FFT of Exponential Signal')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Magnitude')
plt.legend()

plt.tight_layout()
plt.show()






0
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0