0
0

工学のための微分積分学

Last updated at Posted at 2024-08-05
import numpy as np
import matplotlib.pyplot as plt

# 時間範囲の設定
n = np.linspace(-1, 1, 1000)

# ガウス関数でインパルス関数を近似
sigma = 0.0001  # 狭い幅を設定
impulse_approx = (1/(sigma * np.sqrt(2 * np.pi))) * np.exp(-n**2 / (2 * sigma**2))

# プロット
plt.plot(n, impulse_approx)
plt.title('Approximate Impulse Function (Gaussian)')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.grid(True)
plt.show()
import numpy as np
import matplotlib.pyplot as plt

# 定数
V_max = 1.0  # 最大速度
K_m = 0.5    # ミカエリス定数

# 基質濃度の範囲
S = np.linspace(0, 10, 400)

# 反応速度の計算
v = (V_max * S) / (K_m + S)

# プロット
plt.figure()
plt.plot(S, v, label='Michaelis-Menten Kinetics')
plt.xlabel('[S] (Substrate Concentration)')
plt.ylabel('v (Reaction Velocity)')
plt.title('Michaelis-Menten Kinetics')
plt.legend()
plt.grid(True)
plt.show()


from scipy.integrate import odeint

# パラメータ
r = 0.5   # 成長速度
K = 100.0 # 環境収容力

# ロジスティック成長の微分方程式
def logistic_growth(B, t, r, K):
    dBdt = r * B * (1 - B / K)
    return dBdt

# 初期条件
B0 = 1.0

# 時間の範囲
t = np.linspace(0, 50, 400)

# 数値積分の実行
B = odeint(logistic_growth, B0, t, args=(r, K))

# プロット
plt.figure()
plt.plot(t, B, label='Logistic Growth')
plt.xlabel('Time')
plt.ylabel('Biomass')
plt.title('Logistic Growth Over Time')
plt.legend()
plt.grid(True)
plt.show()

from scipy.optimize import minimize

# 成長速度のモデル
def growth_rate(T, pH):
    # 成長速度は温度とpHに依存する仮定
    return -(-0.5 * T**2 + 3*T - 0.1 * (pH - 7)**2 + 2*pH)

# 目的関数(成長速度の負の値を最小化)
def objective_function(vars):
    T, pH = vars
    return -growth_rate(T, pH)

# 初期推定値
initial_guess = [25, 7]  # 例えば25度とpH7

# 範囲の制約(例:温度は0度以上、pHは4以上10以下)
bounds = [(0, 40), (4, 10)]

# 最適化の実行
result = minimize(objective_function, initial_guess, bounds=bounds)

# 結果の表示
optimal_T, optimal_pH = result.x
optimal_growth_rate = -result.fun

print(f"Optimal temperature (T): {optimal_T}")
print(f"Optimal pH: {optimal_pH}")
print(f"Maximum growth rate: {optimal_growth_rate}")

# プロット
T = np.linspace(0, 40, 400)
pH = np.linspace(4, 10, 400)
T, pH = np.meshgrid(T, pH)
growth_rate = -(-0.5 * T**2 + 3*T - 0.1 * (pH - 7)**2 + 2*pH)

plt.figure()
cp = plt.contourf(T, pH, growth_rate, levels=50, cmap='viridis')
plt.colorbar(cp, label='Growth Rate')
plt.scatter(optimal_T, optimal_pH, color='red', zorder=5, label='Optimal Point')
plt.xlabel('Temperature (T)')
plt.ylabel('pH')
plt.title('Cell Culture Optimization')
plt.legend()
plt.grid(True)
plt.show()


import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt

# 反応速度定数
k = 0.1

# 反応速度の微分方程式
def reaction_rate(A, t, k):
    dA_dt = -k * A
    return dA_dt

# 初期条件
A0 = 1.0

# 時間の範囲
t = np.linspace(0, 50, 400)

# 数値積分の実行
A = odeint(reaction_rate, A0, t, args=(k,))

# プロット
plt.figure()
plt.plot(t, A, label='Concentration of A')
plt.xlabel('Time')
plt.ylabel('[A]')
plt.title('Chemical Reaction Progress')
plt.legend()
plt.grid(True)
plt.show()


from scipy.optimize import minimize

# 化学ポテンシャルのモデル
def chemical_potential(vars):
    x, y = vars
    return x**2 + y**2 - 2*x*y

# 初期推定値
initial_guess = [1, 1]

# 最適化の実行
result = minimize(chemical_potential, initial_guess)

# 結果の表示
optimal_x, optimal_y = result.x
optimal_value = chemical_potential(result.x)

print(f"Optimal x: {optimal_x}")
print(f"Optimal y: {optimal_y}")
print(f"Maximum chemical potential: {optimal_value}")

# プロット
x = np.linspace(-5, 5, 400)
y = np.linspace(-5, 5, 400)
X, Y = np.meshgrid(x, y)
Z = X**2 + Y**2 - 2*X*Y

plt.figure()
cp = plt.contourf(X, Y, Z, levels=50, cmap='viridis')
plt.colorbar(cp, label='Chemical Potential')
plt.scatter(optimal_x, optimal_y, color='red', zorder=5, label='Optimal Point')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Chemical Potential Optimization')
plt.legend()
plt.grid(True)
plt.show()


import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import cumtrapz

# 時間の範囲
t = np.linspace(0, 10, 1000)

# 流入率(例:sin波形)
r = 10 * np.sin(0.5 * t) + 20

# トラフィックフローの計算
flow = cumtrapz(r, t, initial=0)

# プロット
plt.figure(figsize=(12, 6))

plt.subplot(1, 2, 1)
plt.plot(t, r, label='Traffic Inflow Rate')
plt.xlabel('Time')
plt.ylabel('Inflow Rate')
plt.title('Traffic Inflow Rate vs Time')
plt.grid(True)

plt.subplot(1, 2, 2)
plt.plot(t, flow, label='Cumulative Traffic Flow', color='orange')
plt.xlabel('Time')
plt.ylabel('Cumulative Flow')
plt.title('Cumulative Traffic Flow vs Time')
plt.grid(True)

plt.tight_layout()
plt.show()


from scipy.integrate import quad

# 定義したい関数
def f(x):
    return np.sin(x)  # 例: y = sin(x)

# 積分範囲
a, b = 0, np.pi  # 例: 0からπまで

# モーメント(x軸周り)
def moment_x_integrand(x):
    return f(x)**2

moment_x, _ = quad(moment_x_integrand, a, b)
print(f"x軸周りのモーメント: {moment_x}")

# モーメント(y軸周り)
def moment_y_integrand(x):
    return x * f(x)

moment_y, _ = quad(moment_y_integrand, a, b)
print(f"y軸周りのモーメント: {moment_y}")


import numpy as np

# 対象関数
def f(x):
    return (x - 3)**2

# 対象関数の勾配(導関数)
def grad_f(x):
    return 2 * (x - 3)

# 最急降下法の実装
def gradient_descent(learning_rate, num_iterations, initial_x):
    x = initial_x
    x_history = [x]
    
    for _ in range(num_iterations):
        grad = grad_f(x)
        x = x - learning_rate * grad
        x_history.append(x)
    
    return x, x_history

# パラメータ設定
learning_rate = 0.1
num_iterations = 100
initial_x = 0

# 最急降下法の実行
optimal_x, x_history = gradient_descent(learning_rate, num_iterations, initial_x)
print(f"最適解: {optimal_x}")


import numpy as np

# 対象関数の二階導関数(ヘッセ行列)
def hessian_f():
    return np.array([[2]])

# ヘッセ行列の計算
H = hessian_f()
print(f"ヘッセ行列: \n{H}")

import numpy as np
import matplotlib.pyplot as plt

# Objective function and its gradient
def f(x):
    return (x - 3)**2

def grad_f(x):
    return 2 * (x - 3)

# Gradient descent implementation
def gradient_descent(learning_rate, num_iterations, initial_x):
    x = initial_x
    x_history = [x]
    
    for _ in range(num_iterations):
        grad = grad_f(x)
        x = x - learning_rate * grad
        x_history.append(x)
    
    return x, x_history

# Parameters
num_iterations = 50
initial_x = 0

# Running gradient descent with different learning rates
learning_rates = [0.01, 0.1, 0.5, 1.0]
plt.figure(figsize=(10, 6))

for lr in learning_rates:
    _, x_history = gradient_descent(lr, num_iterations, initial_x)
    plt.plot(x_history, label=f'Learning rate={lr}')

plt.xlabel('Iteration')
plt.ylabel('Value of x')
plt.title('Convergence of Gradient Descent (Different Learning Rates)')
plt.legend()
plt.grid(True)
plt.show()

# Running gradient descent with different learning rates and measuring convergence time
num_iterations = 100
convergence_times = []

for lr in learning_rates:
    _, x_history = gradient_descent(lr, num_iterations, initial_x)
    convergence_time = len([x for x in x_history if abs(x - 3) < 0.01])
    convergence_times.append(convergence_time)

plt.figure(figsize=(10, 6))
plt.plot(learning_rates, convergence_times, marker='o')
plt.xscale('log')
plt.xlabel('Learning Rate')
plt.ylabel('Number of Iterations to Converge')
plt.title('Learning Rate vs. Number of Iterations to Converge')
plt.grid(True)
plt.show()


import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
from sympy import symbols, exp, diff

# 1. Calculation of Expectation and Variance

# Normal distribution parameters
mu, sigma = 0, 1  # Mean and standard deviation
normal_dist = stats.norm(mu, sigma)
mean_normal = normal_dist.mean()
variance_normal = normal_dist.var()
print(f"Expectation of Normal Distribution: {mean_normal}")
print(f"Variance of Normal Distribution: {variance_normal}")

# Poisson distribution parameters
lambda_poisson = 5  # Mean (and variance)
poisson_dist = stats.poisson(lambda_poisson)
mean_poisson = poisson_dist.mean()
variance_poisson = poisson_dist.var()
print(f"Expectation of Poisson Distribution: {mean_poisson}")
print(f"Variance of Poisson Distribution: {variance_poisson}")

# 2. Moment-Generating Function (MGF) Calculation and Plotting

# MGF of Normal Distribution
def mgf_normal(t, mu=0, sigma=1):
    return np.exp(mu * t + 0.5 * sigma**2 * t**2)

# MGF of Poisson Distribution
def mgf_poisson(t, lambd=5):
    return np.exp(lambd * (np.exp(t) - 1))

# Calculate MGF values
t_values = np.linspace(-1, 1, 100)
mgf_normal_values = mgf_normal(t_values)
mgf_poisson_values = mgf_poisson(t_values)

# Plotting MGF
plt.figure(figsize=(12, 6))

plt.subplot(1, 2, 1)
plt.plot(t_values, mgf_normal_values, label='Normal Distribution MGF')
plt.title('Moment-Generating Function of Normal Distribution')
plt.xlabel('t')
plt.ylabel('MGF')
plt.grid(True)

plt.subplot(1, 2, 2)
plt.plot(t_values, mgf_poisson_values, label='Poisson Distribution MGF', color='orange')
plt.title('Moment-Generating Function of Poisson Distribution')
plt.xlabel('t')
plt.ylabel('MGF')
plt.grid(True)

plt.tight_layout()
plt.show()

# 3. MGF Application for Moments Calculation

# Symbolic variable
t = symbols('t')

# MGF of Normal Distribution
mgf_normal_sym = exp(mu * t + 0.5 * sigma**2 * t**2)

# Calculate 4th moment
n = 4
moment_n_normal = diff(mgf_normal_sym, t, n).subs(t, 0)
print(f"4th Moment of Normal Distribution: {moment_n_normal}")

# MGF of Poisson Distribution
mgf_poisson_sym = exp(lambda_poisson * (exp(t) - 1))

# Calculate 4th moment
moment_n_poisson = diff(mgf_poisson_sym, t, n).subs(t, 0)
print(f"4th Moment of Poisson Distribution: {moment_n_poisson}")


import numpy as np

# 活性化関数とその導関数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):
    return sigmoid(x) * (1 - sigmoid(x))

# ニューラルネットワークの設定
input_size = 2
hidden_size = 2
output_size = 1

np.random.seed(0)
W1 = np.random.randn(input_size, hidden_size)
W2 = np.random.randn(hidden_size, output_size)
b1 = np.random.randn(hidden_size)
b2 = np.random.randn(output_size)

# 順伝播
def forward(X):
    z1 = np.dot(X, W1) + b1
    a1 = sigmoid(z1)
    z2 = np.dot(a1, W2) + b2
    a2 = sigmoid(z2)
    return a2, a1, z1

# 逆伝播
def backward(X, y, a2, a1, z1):
    m = y.shape[0]
    dz2 = a2 - y
    dW2 = (1/m) * np.dot(a1.T, dz2)
    db2 = (1/m) * np.sum(dz2, axis=0)
    dz1 = np.dot(dz2, W2.T) * sigmoid_derivative(z1)
    dW1 = (1/m) * np.dot(X.T, dz1)
    db1 = (1/m) * np.sum(dz1, axis=0)
    return dW1, db1, dW2, db2

# 更新
def update_parameters(dW1, db1, dW2, db2, learning_rate):
    global W1, b1, W2, b2
    W1 -= learning_rate * dW1
    b1 -= learning_rate * db1
    W2 -= learning_rate * dW2
    b2 -= learning_rate * db2

# トレーニング
X_train = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_train = np.array([[0], [1], [1], [0]])

epochs = 10000
learning_rate = 0.1

for epoch in range(epochs):
    a2, a1, z1 = forward(X_train)
    dW1, db1, dW2, db2 = backward(X_train, y_train, a2, a1, z1)
    update_parameters(dW1, db1, dW2, db2, learning_rate)

print(f"最終重み: \nW1: {W1}\nW2: {W2}")


import numpy as np

# データ生成
np.random.seed(0)
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.randn(100, 1)

# 線形回帰モデル
def predict(X, theta):
    return X.dot(theta)

def compute_cost(X, y, theta):
    m = len(y)
    return (1/(2*m)) * np.sum(np.square(predict(X, theta) - y))

def compute_gradient(X, y, theta):
    m = len(y)
    return (1/m) * X.T.dot(predict(X, theta) - y)

def gradient_descent(X, y, theta, learning_rate, num_iterations):
    cost_history = np.zeros(num_iterations)
    for i in range(num_iterations):
        gradient = compute_gradient(X, y, theta)
        theta = theta - learning_rate * gradient
        cost_history[i] = compute_cost(X, y, theta)
    return theta, cost_history

# 初期パラメータ
X_b = np.c_[np.ones((100, 1)), X]  # バイアス項を追加
theta_initial = np.random.randn(2, 1)
learning_rate = 0.1
num_iterations = 1000

# 最急降下法
theta_optimal, cost_history = gradient_descent(X_b, y, theta_initial, learning_rate, num_iterations)
print(f"最適なパラメータ: {theta_optimal}")

import sympy as sp

# シンボリック変数の定義
x1, x2 = sp.symbols('x1 x2')

# ベクトル値関数の定義
f1 = x1**2 + x2**2
f2 = x1 * x2

# ジャコビ行列の作成
f = sp.Matrix([f1, f2])
x = sp.Matrix([x1, x2])
J = f.jacobian(x)

print("ジャコビ行列:")
sp.pprint(J)

import sympy as sp

# シンボリック変数の定義
x1, x2 = sp.symbols('x1 x2')

# スカラー値関数の定義
f = x1**2 + 2*x1*x2 + 3*x2**2

# ヘッセ行列の作成
H = sp.hessian(f, (x1, x2))

print("ヘッセ行列:")
sp.pprint(H)

import numpy as np
import matplotlib.pyplot as plt

# Parameter settings
lambda_ = 0.1  # Growth rate
sigma = 0.2    # Strength of noise
X0 = 10        # Initial cell count
T = 100        # Total simulation time
dt = 0.1       # Time step
N = int(T / dt) # Number of time steps

# Time array
t = np.linspace(0, T, N+1)

# Initialize cell count
X = np.zeros(N+1)
X[0] = X0

# Generate Brownian motion
dW = np.sqrt(dt) * np.random.randn(N)

# Perform simulation
for i in range(N):
    X[i+1] = X[i] + lambda_ * X[i] * (1 - X[i]) * dt + sigma * X[i] * dW[i]

# Plot results
plt.plot(t, X, label='Cell Count')
plt.xlabel('Time')
plt.ylabel('Cell Count')
plt.title('Simulation of Cell Division Process')
plt.legend()
plt.grid(True)
plt.show()


import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression

# サンプルデータ
X = np.random.rand(100, 1) * 10
y = 2.5 * X + np.random.randn(100, 1) * 2

# 線形回帰モデルの学習
model = LinearRegression()
model.fit(X, y)

# 回帰直線のプロット
plt.scatter(X, y, color='blue', label='Data')
plt.plot(X, model.predict(X), color='red', label='Fitted Line')
plt.xlabel('X')
plt.ylabel('y')
plt.title('Linear Regression')
plt.legend()
plt.show()



import numpy as np
import matplotlib.pyplot as plt

# 凸関数 f(x) = x^2
def f(x):
    return x**2

# 勾配 (導関数) df/dx = 2x
def gradient(x):
    return 2*x

# 勾配降下法
def gradient_descent(starting_point, learning_rate, num_iterations):
    x = starting_point
    history = [x]
    for _ in range(num_iterations):
        grad = gradient(x)
        x = x - learning_rate * grad
        history.append(x)
    return x, history

# 初期設定
starting_point = 10.0
learning_rate = 0.1
num_iterations = 50

# 勾配降下法の実行
minimum, history = gradient_descent(starting_point, learning_rate, num_iterations)

# 結果のプロット
x_values = np.linspace(-12, 12, 400)
y_values = f(x_values)

plt.figure(figsize=(10, 6))
plt.plot(x_values, y_values, label='$f(x) = x^2$', color='blue')
plt.scatter(history, [f(x) for x in history], color='red', label='Iterations')
plt.plot(history, [f(x) for x in history], color='red', linestyle='--', alpha=0.5)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Gradient Descent Optimization')
plt.legend()
plt.grid()
plt.show()

print(f"最小値は x = {minimum:.4f} です。")
import torch

# 変数を定義し、勾配を計算する設定
x = torch.tensor(1.0, requires_grad=True)
y = torch.tensor(2.0, requires_grad=True)

# 関数の定義
z = x**2 + y**2

# 勾配の計算
z.backward()

print("Gradient w.r.t x:", x.grad.item())
print("Gradient w.r.t y:", y.grad.item())



import numpy as np
from scipy.optimize import minimize

# 目的関数
def f(x):
    return x[0]**2 + x[1]**2

# 勾配
def gradient_f(x):
    return np.array([2 * x[0], 2 * x[1]])

# ヘッセ行列
def hessian_f(x):
    return np.array([[2, 0], [0, 2]])

# ニュートン法による最適化
result = minimize(f, x0=np.array([1.0, 2.0]), jac=gradient_f, hess=hessian_f)
print("Optimal point:", result.x)
print("Function value at optimal point:", result.fun)


import matplotlib.pyplot as plt

# 目的関数の定義
def f(x, y):
    return x**2 + y**2

# 最適化の初期点
start = np.array([1.0, 2.0])

# 目的関数の値を計算
x = np.linspace(-3, 3, 100)
y = np.linspace(-3, 3, 100)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)

# プロット
plt.figure(figsize=(8, 6))
plt.contour(X, Y, Z, levels=20, cmap='viridis')
plt.colorbar()
plt.scatter(start[0], start[1], color='red', marker='x', label='Start')
plt.title('Contour plot of the function')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()


import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Lasso, Ridge
from sklearn.datasets import make_regression

# L1ノルムの計算と勾配
def L1_norm(x):
    return np.sum(np.abs(x))

def gradient_L1_norm(x):
    return np.sign(x)

# L2ノルムの計算と勾配
def L2_norm(x):
    return np.sqrt(np.sum(x**2))

def gradient_L2_norm(x):
    norm = L2_norm(x)
    if norm == 0:
        return np.zeros_like(x)
    return x / norm

# 使用例
x = np.array([1.0, -2.0, 3.0])
L1 = L1_norm(x)
grad_L1 = gradient_L1_norm(x)

L2 = L2_norm(x)
grad_L2 = gradient_L2_norm(x)

print("L1 Norm:", L1)
print("Gradient of L1 Norm:", grad_L1)
print("L2 Norm:", L2)
print("Gradient of L2 Norm:", grad_L2)

# データの生成
X, y = make_regression(n_samples=100, n_features=2, noise=0.1, random_state=42)

# L1正則化(Lasso回帰)
lasso = Lasso(alpha=0.1)  # alphaは正則化強度
lasso.fit(X, y)
print("Lasso Coefficients:", lasso.coef_)

# L2正則化(Ridge回帰)
ridge = Ridge(alpha=0.1)  # alphaは正則化強度
ridge.fit(X, y)
print("Ridge Coefficients:", ridge.coef_)

# ノルムの可視化
x_vals = np.linspace(-2, 2, 100)
y_vals = np.linspace(-2, 2, 100)
X_grid, Y_grid = np.meshgrid(x_vals, y_vals)

Z_L1 = np.abs(X_grid) + np.abs(Y_grid)
Z_L2 = np.sqrt(X_grid**2 + Y_grid**2)

plt.figure(figsize=(12, 6))

plt.subplot(1, 2, 1)
plt.contourf(X_grid, Y_grid, Z_L1, cmap='coolwarm')
plt.title('L1 Norm Contour')
plt.colorbar()

plt.subplot(1, 2, 2)
plt.contourf(X_grid, Y_grid, Z_L2, cmap='coolwarm')
plt.title('L2 Norm Contour')
plt.colorbar()

plt.show()

import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.integrate import odeint

# ロジスティック成長モデルの定義
def logistic_growth(N, t, r, K):
    return r * N * (1 - N / K)

# パラメータの初期推定値
r_init = 0.1
K_init = 1000
N0 = 10  # 初期細胞数

# シミュレーション時間の定義
t = np.linspace(0, 50, 100)

# 実際のデータ生成(真のパラメータを使用)
true_r = 0.3
true_K = 500
N_true = odeint(logistic_growth, N0, t, args=(true_r, true_K))

# 観測データ(ノイズを加える)
noise = np.random.normal(0, 50, N_true.shape)
N_obs = N_true + noise

# 最適化関数の定義
def error(params, t, N_obs):
    r, K = params
    N_sim = odeint(logistic_growth, N0, t, args=(r, K))
    return np.sum((N_obs - N_sim)**2)

# 最適化の実行
result = minimize(error, [r_init, K_init], args=(t, N_obs))

# 最適なパラメータの抽出
opt_r, opt_K = result.x

# 最適なパラメータでのシミュレーション
N_opt = odeint(logistic_growth, N0, t, args=(opt_r, opt_K))

# 結果のプロット
plt.figure(figsize=(10, 6))
plt.plot(t, N_true, label='True Data')
plt.scatter(t, N_obs, label='Noisy Observations', color='red')
plt.plot(t, N_opt, label='Optimized Model', linestyle='--')
plt.xlabel('Time')
plt.ylabel('Population')
plt.legend()
plt.show()

print(f"Estimated parameters: r = {opt_r:.2f}, K = {opt_K:.2f}")

import numpy as np
import matplotlib.pyplot as plt

# Constants
L = 10.0  # Beam length (m)
P = 1000.0  # Concentrated load (N)
E = 210e9  # Young's modulus (Pa)
I = 8.33e-6  # Second moment of area (m^4)

# Range of x
x = np.linspace(0, L, 100)

# Displacement calculation
def displacement(x, L, P, E, I):
    return (P / (6 * E * I)) * (x**3 - 3 * L * x**2 + 2 * L**2 * x)

w = displacement(x, L, P, E, I)

# Strain calculation
def strain(x, L, P, E, I):
    return (P / (2 * E * I)) * (x - L / 2)

epsilon = strain(x, L, P, E, I)

# Stress calculation
sigma = E * epsilon

# Plotting
fig, ax1 = plt.subplots()

ax1.set_xlabel('Position x (m)')
ax1.set_ylabel('Displacement w (m)', color='tab:blue')
ax1.plot(x, w, 'b-', label='Displacement w(x)')
ax1.tick_params(axis='y', labelcolor='tab:blue')

ax2 = ax1.twinx()
ax2.set_ylabel('Strain ε (m/m) and Stress σ (Pa)', color='tab:red')
ax2.plot(x, epsilon, 'r--', label='Strain ε(x)')
ax2.plot(x, sigma, 'r-', label='Stress σ(x)')
ax2.tick_params(axis='y', labelcolor='tab:red')

fig.tight_layout()
fig.legend(loc='upper left')
plt.title('Beam Deflection, Strain, and Stress Analysis')
plt.show()
# 必要なライブラリのインポート
import numpy as np
import matplotlib.pyplot as plt

# データの生成
np.random.seed(0)
X = np.linspace(-1, 1, 100)
y = 3 * X + np.random.randn(*X.shape) * 0.33  # y = 3x + ノイズ

# 学習率とエポック数の設定
learning_rate = 0.01
epochs = 1000

# パラメータの初期化
W = np.random.randn()
b = np.random.randn()

# 損失関数 (平均二乗誤差)
def compute_loss(y, y_pred):
    return 0.5 * np.mean((y - y_pred) ** 2)

# 勾配降下法によるパラメータの更新
losses = []
for epoch in range(epochs):
    y_pred = W * X + b
    loss = compute_loss(y, y_pred)
    losses.append(loss)
    
    # 勾配の計算
    dW = np.mean((y_pred - y) * X)
    db = np.mean(y_pred - y)
    
    # パラメータの更新
    W -= learning_rate * dW
    b -= learning_rate * db
    
    # 進捗表示
    if epoch % 100 == 0:
        print(f'Epoch {epoch}: Loss = {loss:.4f}')

print(f'Final parameters: W = {W:.4f}, b = {b:.4f}')

# 結果のプロット
plt.figure(figsize=(10, 6))
plt.scatter(X, y, label='Data points')
plt.plot(X, y_pred, color='red', label='Fitted line')
plt.xlabel("X")
plt.ylabel("y")
plt.title("Linear Regression with Gradient Descent")
plt.legend()
plt.show()

# 損失関数のプロット
plt.figure(figsize=(10, 6))
plt.plot(losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Loss over Epochs")
plt.show()


# 集合の基本操作
A = {1, 2, 3, 4}
B = {3, 4, 5, 6}

# 和集合
union = A.union(B)
print(f'Union: {union}')

# 積集合
intersection = A.intersection(B)
print(f'Intersection: {intersection}')

# 差集合
difference = A.difference(B)
print(f'Difference: {difference}')

# 5.1.3 テイラーの公式
def taylor_series(func, x0, n):
    x = sp.symbols('x')
    series = 0
    for i in range(n+1):
        series += (func.diff(x, i).subs(x, x0) / sp.factorial(i)) * (x - x0)**i
    return series

x = sp.symbols('x')
func = sp.exp(x)  # Example: Exponential function
taylor_exp = taylor_series(func, 0, 5)

print(f'Taylor Series of exp(x) around 0: {taylor_exp}')

# プロット
x_vals = np.linspace(-2, 2, 400)
y_vals = np.exp(x_vals)
taylor_func = sp.lambdify(x, taylor_exp, 'numpy')
y_taylor = taylor_func(x_vals)

plt.figure()
plt.plot(x_vals, y_vals, label='exp(x)')
plt.plot(x_vals, y_taylor, label='Taylor Series (n=5)')
plt.title('Taylor Series Approximation')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.grid(True)
plt.show()

# 5.2.2 関数項級数と収束の視覚化
def plot_partial_sums(func, x_vals, terms=5):
    partial_sum = np.zeros_like(x_vals)
    for n in range(terms):
        term = func(n, x_vals)
        partial_sum += term
        plt.plot(x_vals, partial_sum, label=f'Partial Sum n={n+1}')
    plt.xlabel('x')
    plt.ylabel('Partial Sum')
    plt.legend()
    plt.grid(True)
    plt.title('Convergence of Function Series')
    plt.show()

# Example: Sine function series expansion
def sine_series_term(n, x):
    return ((-1)**n / np.math.factorial(2*n+1)) * x**(2*n+1)

x_vals = np.linspace(-2 * np.pi, 2 * np.pi, 400)
plot_partial_sums(sine_series_term, x_vals, terms=5)



import sympy as sp
import numpy as np
import matplotlib.pyplot as plt

# 6.1.1 全微分と偏微分
x, y = sp.symbols('x y')
f = x**2 + y**2

# 偏微分
f_x = sp.diff(f, x)
f_y = sp.diff(f, y)

print(f'Partial derivative with respect to x: {f_x}')
print(f'Partial derivative with respect to y: {f_y}')

# 6.1.2 全微分可能条件
df = f_x * sp.Symbol('dx') + f_y * sp.Symbol('dy')
print(f'Total differential: {df}')

# 6.1.3 高階偏導関数
f_xx = sp.diff(f_x, x)
f_yy = sp.diff(f_y, y)
f_xy = sp.diff(f_x, y)

print(f'Second order partial derivative with respect to x: {f_xx}')
print(f'Second order partial derivative with respect to y: {f_yy}')
print(f'Mixed partial derivative: {f_xy}')

# 6.1.4 多変数関数のテイラーの公式
def taylor_series_multivariable(func, vars, point, order):
    series = func
    for i in range(1, order + 1):
        for var in vars:
            series += (sp.diff(func, var, i).subs([(v, p) for v, p in zip(vars, point)]) / sp.factorial(i)) * (var - point[vars.index(var)])**i
    return series

taylor_f = taylor_series_multivariable(f, [x, y], [0, 0], 2)
print(f'Taylor series expansion: {taylor_f}')

これらのトピックは、ベクトル解析や流体力学の基本的な概念を理解するために重要です。それぞれの項目についてPythonでの具体的な例を示します。

4.1 ベクトル

ベクトルは大きさと方向を持つ量で、Pythonではnumpyライブラリを使ってベクトルの計算ができます。

import numpy as np

# ベクトルの定義
a = np.array([2, 3])
b = np.array([4, 1])

# ベクトルの加算
c = a + b

# ベクトルの内積
dot_product = np.dot(a, b)

# ベクトルの外積 (2Dではスカラー)
cross_product = np.cross(a, b)

print("ベクトルa:", a)
print("ベクトルb:", b)
print("ベクトルの加算:", c)
print("内積:", dot_product)
print("外積:", cross_product)

Grad(勾配)

勾配はスカラー場の各点における最大の変化率を示します。

import numpy as np
import matplotlib.pyplot as plt

# スカラー場の定義
def scalar_field(x, y):
    return np.sin(x) * np.cos(y)

# 勾配の計算
def gradient(f, x, y):
    fx, fy = np.gradient(f, x, y)
    return fx, fy

x = np.linspace(-2*np.pi, 2*np.pi, 100)
y = np.linspace(-2*np.pi, 2*np.pi, 100)
X, Y = np.meshgrid(x, y)
Z = scalar_field(X, Y)

fx, fy = gradient(Z, x, y)

plt.figure(figsize=(12, 6))

# スカラー場のプロット
plt.subplot(1, 2, 1)
plt.contourf(X, Y, Z, cmap='viridis')
plt.title('Scalar Field')
plt.colorbar()

# 勾配のプロット
plt.subplot(1, 2, 2)
plt.quiver(X, Y, fx, fy)
plt.title('Gradient Field')
plt.show()

Div(発散)

発散はベクトル場の各点での「源」や「吸い込み」の強さを示します。

def divergence(fx, fy, x, y):
    dfx_dx, dfx_dy = np.gradient(fx, x, y)
    dfy_dx, dfy_dy = np.gradient(fy, x, y)
    return dfx_dx + dfy_dy

# ベクトル場の定義
def vector_field(x, y):
    fx = -y
    fy = x
    return fx, fy

fx, fy = vector_field(X, Y)
div_f = divergence(fx, fy, x, y)

plt.contourf(X, Y, div_f, cmap='RdBu')
plt.title('Divergence Field')
plt.colorbar()
plt.show()

Rot(ローテーション、またはカール)

ローテーションはベクトル場の「渦」を示します。

def curl(fx, fy, x, y):
    dfy_dx, dfy_dy = np.gradient(fy, x, y)
    dfx_dx, dfx_dy = np.gradient(fx, x, y)
    return dfy_dx - dfx_dy

curl_f = curl(fx, fy, x, y)

plt.contourf(X, Y, curl_f, cmap='coolwarm')
plt.title('Curl Field')
plt.colorbar()
plt.show()

ガウスの定理は、閉じた曲面を通過するベクトル場の流束が、その内部の発散の積分に等しいことを示します。

from scipy.integrate import dblquad

# ベクトル場の定義
def vector_field_3d(x, y, z):
    return np.array([x, y, z])

# 発散の計算
def divergence_3d(x, y, z):
    return 3  # ∇・F = 3 の定数場

# ガウスの定理による体積積分
volume_integral, _ = dblquad(lambda x, y: divergence_3d(x, y, 0), -1, 1, lambda x: -1, lambda x: 1)
surface_integral = 2 * np.pi * (1)**2  # 半径1の球の表面積

print("体積積分:", volume_integral)
print("表面積積分:", surface_integral)

4.4 ストークスの定理と応用

ストークスの定理は、閉じた曲面の上の線積分が、その面の回転の面積積分に等しいことを示します。

from scipy.integrate import simps

# ベクトル場の定義
def vector_field_2d(x, y):
    return np.array([-y, x])

# 回転の計算
def curl_2d(fx, fy, x, y):
    dfx_dy, dfy_dx = np.gradient(fx, x), np.gradient(fy, y)
    return dfy_dx - dfx_dy

# 面積積分の計算
def surface_integral(x, y):
    fx, fy = vector_field_2d(X, Y)
    return simps(simps(curl_2d(fx, fy, x, y), x), y)

# 線積分と面積積分
line_integral = 2 * np.pi  # 円の長さ
surface_integral = surface_integral(X, Y)

print("線積分:", line_integral)
print("面積積分:", surface_integral)

import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt

# 定数の定義
b = 2  # 例: b = 2
c = 3  # 例: c = 3

# 微分方程式の定義
def differential_eq(t, y):
    return [y[1], 1 - b*y[1] - c*y[0]]

# 初期条件
y0 = [0, 0]  # 初期値: y(0) = 0, y'(0) = 0

# 時間範囲の設定
t_span = (0, 10)
t_eval = np.linspace(*t_span, 500)

# 数値的な解の計算
sol = solve_ivp(differential_eq, t_span, y0, t_eval=t_eval)

# 結果のプロット
plt.plot(sol.t, sol.y[0])
plt.title('Solution of $y\'\' + by\' + cy = 1$')
plt.xlabel('t')
plt.ylabel('y(t)')
plt.grid(True)
plt.show()


import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt

# 定数の定義
a = 1  # 例: a = 1
b = 2  # 例: b = 2
c = 3  # 例: c = 3

# 微分方程式の定義(複素数の取り扱い)
def differential_eq_complex(t, y):
    y1, y2 = y[0], y[1]
    return [y2, (np.exp(1j * a * t) - b * y2 - c * y1) / y1]

# 初期条件(複素数)
y0 = [1 + 1j, 0]  # 初期値: y(0) = 1 + 1j, y'(0) = 0

# 時間範囲の設定
t_span = (0, 10)
t_eval = np.linspace(*t_span, 500)

# 数値的な解の計算
sol = solve_ivp(differential_eq_complex, t_span, y0, t_eval=t_eval, vectorized=True)

# 結果のプロット(実部と虚部)
plt.plot(sol.t, np.real(sol.y[0]), label='Real part')
plt.plot(sol.t, np.imag(sol.y[0]), label='Imaginary part', linestyle='--')
plt.title('Solution of $y y\'\' + b y\' + c y = e^{iax}$')
plt.xlabel('t')
plt.ylabel('y(t)')
plt.legend()
plt.grid(True)
plt.show()


import numpy as np
import matplotlib.pyplot as plt

# 1.4 角運動量とモーメントの計算
def angular_momentum(r, p):
    return np.cross(r, p)

def moment(r, F):
    return np.cross(r, F)

# 位置ベクトル、運動量、力の定義
r = np.array([1, 2, 3])
p = np.array([4, 5, 6])
F = np.array([7, 8, 9])

# 角運動量とモーメントの計算
L = angular_momentum(r, p)
M = moment(r, F)

print("Angular Momentum:", L)
print("Moment (Torque):", M)

# 1.5 剛体の運動
# 剛体の並進運動と回転運動の例
def rigid_body_motion(position, velocity, angular_velocity):
    return position + velocity, angular_velocity

# 初期位置、速度、角速度
position = np.array([1, 1, 1])
velocity = np.array([0.5, 0.5, 0.5])
angular_velocity = np.array([0.1, 0.2, 0.3])

new_position, new_angular_velocity = rigid_body_motion(position, velocity, angular_velocity)
print("New Position:", new_position)
print("New Angular Velocity:", new_angular_velocity)

# 1.6 コリオリ力の計算
def coriolis_force(omega, v, m):
    return -2 * m * np.cross(omega, v)

# 角速度、物体の速度、質量
omega = np.array([0, 0, 1])
v = np.array([1, 0, 0])
m = 2

F_C = coriolis_force(omega, v, m)
print("Coriolis Force:", F_C)

# 1.7 仮想仕事の原理
def virtual_work(F, delta_r):
    return np.dot(F, delta_r)

# 力と仮想変位の定義
F = np.array([2, 3, 4])
delta_r = np.array([1, 0, 0])

delta_W = virtual_work(F, delta_r)
print("Virtual Work:", delta_W)

0
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0