#---------------------------------------------
# 必要ライブラリ / Required Libraries
#---------------------------------------------
!pip install mido soundfile
import numpy as np
import soundfile as sf
import scipy.signal as sg
import mido
from google.colab import files
from IPython.display import Audio
#---------------------------------------------
# シグモイド系補助関数 / Sigmoid Functions
#---------------------------------------------
def calc_2eq(x, p1, p2, p3, p4):
return 1 / (1 + np.exp(-(x - p1) / p3)) * p4 + p2
def calc_5eq(x, p1, p2, p3, p4):
return (1 - 1 / (1 + np.exp(-(x - p1) / p3))) * p4 + p2
#---------------------------------------------
# フルート音合成関数 / Flute Sound Synthesizer
#---------------------------------------------
def gen_flute(fs, note_number, velocity, gate):
duration = gate + 1
N = int(fs * duration)
y = np.zeros(N)
f0 = 440 * (2 ** ((note_number - 69) / 12))
num_partials = 30
# 周波数と振幅のゆらぎ量設定
vca_depth_tbl = np.ones((128, num_partials)) * 0.01
vca_depth_tbl[60:75, :] = np.random.rand(15, num_partials) * 0.5
vca_depth = vca_depth_tbl[note_number, :]
freq = np.arange(1, num_partials + 1) * f0
a_freq_jitter = calc_2eq(note_number, 108, 1, 150 / 12, 20)
vca_S = np.ones(num_partials)
vca_delay = calc_2eq(freq, 0, -0.1, 8000 / 12, 0.2)
vca_A = calc_5eq(freq, 0, 0.1, 8000 / 12, 0.2)
vca_R = calc_5eq(freq, 0, 0.2, 8000 / 12, 0.4)
a_amp_jitter = calc_2eq(freq / f0, 1, -0.3, 10 / 12, 0.8)
# 部分音ごとに加算
for i in range(num_partials):
rnd = np.random.rand(N) * 2 - 1
b, a = sg.iirfilter(2, 40 / (fs // 2), btype='lowpass')
jitter = sg.lfilter(b, a, rnd)
jitter /= np.max(np.abs(jitter))
f_vco = freq[i] + a_freq_jitter * jitter
sin_wave = np.zeros(N)
phase = 0
for n in range(N):
sin_wave[n] = np.sin(2 * np.pi * phase)
phase += f_vco[n] / fs
if phase >= 1:
phase -= 1
env = np.zeros(N)
d_n = int(vca_delay[i] * fs)
a_n = int(vca_A[i] * fs)
r_n = int(vca_R[i] * fs)
gate_n = int(gate * fs)
for n in range(d_n, d_n + a_n):
env[n] = vca_S[i] * (0.5 - 0.5 * np.cos(np.pi * (n - d_n) / a_n))
for n in range(d_n + a_n, gate_n):
env[n] = vca_S[i]
for n in range(gate_n, min(gate_n + r_n, N)):
env[n] = vca_S[i] * (0.5 + 0.5 * np.cos(np.pi * (n - gate_n) / r_n))
env *= vca_depth[i]
rnd = np.random.rand(N) * 2 - 1
jitter = sg.lfilter(b, a, rnd)
jitter /= np.max(np.abs(jitter))
env_vca = env * (1 + a_amp_jitter[i] * jitter)
y += sin_wave * env_vca
y *= velocity / 127 / np.max(np.abs(y))
return y
#---------------------------------------------
# MIDIファイル読み込み / MIDI Import
#---------------------------------------------
print("🎵 Please upload a MIDI file")
uploaded = files.upload()
midi_path = list(uploaded.keys())[0]
midi_data = mido.MidiFile(midi_path)
print("✅ MIDI loaded:", midi_path)
#---------------------------------------------
# 合成パラメータ設定 / Global Parameters
#---------------------------------------------
fs = 48000
wav_out_name = "flute_midi_output.wav"
# MIDIテンポ情報(存在しない場合は120BPMに設定)
tempo = 500000 # default (120 bpm)
for msg in midi_data:
if msg.type == 'set_tempo':
tempo = msg.tempo
break
ticks_per_beat = midi_data.ticks_per_beat
seconds_per_tick = tempo / 1_000_000 / ticks_per_beat
#---------------------------------------------
# MIDIイベント→音合成 / Synthesize per Note
#---------------------------------------------
audio = np.zeros(1)
for track in midi_data.tracks:
current_time = 0
for msg in track:
current_time += msg.time
if msg.type == 'note_on' and msg.velocity > 0:
note = msg.note
velocity = msg.velocity
# ノートオフまでの長さを探索
gate_ticks = 0
for submsg in track[track.index(msg)+1:]:
gate_ticks += submsg.time
if submsg.type == 'note_off' or (submsg.type == 'note_on' and submsg.velocity == 0):
break
gate_time = gate_ticks * seconds_per_tick
# 合成
y = gen_flute(fs, note, velocity, gate_time)
# 出力に追加
pad = int(current_time * seconds_per_tick * fs)
if pad + len(y) > len(audio):
audio = np.pad(audio, (0, pad + len(y) - len(audio)))
audio[pad:pad+len(y)] += y
#---------------------------------------------
# 正規化・保存・再生 / Normalize, Save, Play
#---------------------------------------------
audio /= np.max(np.abs(audio))
sf.write(wav_out_name, audio, fs, subtype='PCM_16')
print("✅ Exported:", wav_out_name)
Audio(audio, rate=fs)
🔊 実行手順(Google Colab推奨)
- 上記コードを Colab セルに貼り付けて実行。
- 指示に従って MIDIファイル(例:flute_melody.mid) をアップロード。
- 自動で
flute_midi_output.wavが生成され、Colab上で再生できます。
この実装は
-
midoで MIDI ノートを読み取り、 - 各ノートのオン〜オフ時間(gate)を自動計算し、
-
gen_flute()関数でフルート的加算合成(30倍音+ゆらぎ)を行います。
音響的特徴:柔らかい倍音構成・ゆらぎ・自然な減衰があり、実際のフルートの息感を簡易再現します。