Pytorch 2.7.1+cu126 で量子アルゴリズムを使ったパーセプトロンの実験をしてみました。
ChatGPT o3を使いました。
結構面白い結果を出します。(確率論的)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Quantum‑style perceptron with path visualisation (CUDA 12.4 compatible)
Layers:
0. |0000⟩ (implicit)
1. H on every qubit
2. Ry(θᵢ) on each qubit — random “weights”
3‑… random CNOT “synapses” — three by default
For each shot we *measure* after every layer, collapse the state,
and feed the resulting basis state to the next layer.
This yields an integer path, e.g. 0 → 6 → 3 → 7 → 15.
The script shows:
• Top‑10 most frequent paths
• Final measurement histogram (same as before)
Author : Tsubasa Kato - Inspire Search Corp.
Date : 2025‑07‑16
"""
import math
import random
from collections import Counter, defaultdict
import torch
# -----------------------------------------------------------
# 0. Hyper‑parameters
# -----------------------------------------------------------
NUM_QUBITS = 4
NUM_SYNAPSES = 3 # number of random CNOTs
SHOTS = 1024 # how many trajectories to sample
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
DTYPE = torch.complex64
# -----------------------------------------------------------
# 1. Primitive gates
# -----------------------------------------------------------
I2 = torch.eye(2, dtype=DTYPE, device=device)
H = (1 / math.sqrt(2)) * torch.tensor(
[[1, 1],
[1, -1]], dtype=DTYPE, device=device
)
def Ry(theta: float) -> torch.Tensor:
"""Single‑qubit rotation around Y."""
return torch.tensor(
[[ math.cos(theta / 2), -math.sin(theta / 2)],
[ math.sin(theta / 2), math.cos(theta / 2)]],
dtype=DTYPE,
device=device,
)
def embed_single(gate: torch.Tensor, target: int, n: int) -> torch.Tensor:
mats = [gate if q == target else I2 for q in range(n)]
return torch.kron(torch.kron(torch.kron(mats[3], mats[2]), mats[1]), mats[0]) \
if n == 4 else torch.kron(torch.kron(mats[2], mats[1]), mats[0]) # generic kron_all is slower for n<=4
def embed_cnot(ctrl: int, tgt: int, n: int) -> torch.Tensor:
dim = 2 ** n
op = torch.zeros((dim, dim), dtype=DTYPE, device=device)
for basis in range(dim):
bits = [(basis >> q) & 1 for q in range(n)]
flip = basis ^ (1 << tgt) if bits[ctrl] else basis
op[basis, flip] = 1.0
return op
# -----------------------------------------------------------
# 2. Build each *layer* operator and keep them separately
# -----------------------------------------------------------
layers = []
# (Layer 1) H on every qubit
U_H = torch.eye(2 ** NUM_QUBITS, dtype=DTYPE, device=device)
for q in range(NUM_QUBITS):
U_H = embed_single(H, q, NUM_QUBITS) @ U_H
layers.append(U_H)
# (Layer 2) random Ry on every qubit
U_RY = torch.eye(2 ** NUM_QUBITS, dtype=DTYPE, device=device)
for q in range(NUM_QUBITS):
θ = random.uniform(0, 2 * math.pi)
U_RY = embed_single(Ry(θ), q, NUM_QUBITS) @ U_RY
layers.append(U_RY)
# (Layers 3…) random CNOTs
connected = set()
while len(connected) < NUM_SYNAPSES:
a, b = random.sample(range(NUM_QUBITS), 2)
if (a, b) not in connected and (b, a) not in connected:
layers.append(embed_cnot(a, b, NUM_QUBITS))
connected.add((a, b))
# -----------------------------------------------------------
# 3. Function to propagate ONE shot through all layers
# -----------------------------------------------------------
def single_trajectory() -> tuple:
"""Return a tuple of decimal basis states after each layer."""
state = torch.zeros(2 ** NUM_QUBITS, dtype=DTYPE, device=device)
state[0] = 1.0 # |0000⟩
path = []
for U in layers:
state = U @ state
probs = (state.abs() ** 2).to(torch.float32)
idx = torch.multinomial(probs, 1, replacement=True).item()
path.append(idx)
# collapse: new one‑hot basis vector
state.zero_()
state[idx] = 1.0
return tuple(path) # e.g. (6, 3, 7, 15)
# -----------------------------------------------------------
# 4. Sample many trajectories
# -----------------------------------------------------------
path_counter = Counter()
final_counts = Counter()
for _ in range(SHOTS):
traj = single_trajectory()
path_counter[traj] += 1
final_counts[traj[-1]] += 1
# -----------------------------------------------------------
# 5. Pretty‑print results
# -----------------------------------------------------------
def fmt_bits(idx: int) -> str:
"""Little‑endian binary string (matches Qiskit ordering)."""
return format(idx, f"0{NUM_QUBITS}b")[::-1]
print(f"\nDevice : {device} | PyTorch {torch.__version__}")
print(f"Layers : 1 (H) → 2 (Ry) → 3…{2+NUM_SYNAPSES} (CNOTs)")
print(f"Shots : {SHOTS}\n")
print("Top‑10 activation *paths* (decimal indices):")
for path, cnt in path_counter.most_common(10):
decimal_path = " -> ".join(map(str, path))
binary_path = " -> ".join(fmt_bits(i) for i in path)
print(f"{decimal_path:<20} ({cnt:>4} shots) [{binary_path}]")
print("\nFinal measurement histogram:")
for idx, cnt in final_counts.most_common():
print(f"{fmt_bits(idx)} ({idx:>2}) : {cnt}")
実行例
Device : cuda | PyTorch 2.7.1+cu126
Layers : 1 (H) → 2 (Ry) → 3…5 (CNOTs)
Shots : 1024
Top‑10 activation *paths* (decimal indices):
2 -> 3 -> 3 -> 3 -> 2 ( 50 shots) [0100 -> 1100 -> 1100 -> 1100 -> 0100]
5 -> 4 -> 12 -> 13 -> 13 ( 49 shots) [1010 -> 0010 -> 0011 -> 1011 -> 1011]
15 -> 14 -> 6 -> 7 -> 6 ( 48 shots) [1111 -> 0111 -> 0110 -> 1110 -> 0110]
7 -> 6 -> 14 -> 15 -> 14 ( 47 shots) [1110 -> 0110 -> 0111 -> 1111 -> 0111]
6 -> 7 -> 15 -> 14 -> 15 ( 43 shots) [0110 -> 1110 -> 1111 -> 0111 -> 1111]
11 -> 10 -> 10 -> 10 -> 11 ( 42 shots) [1101 -> 0101 -> 0101 -> 0101 -> 1101]
9 -> 8 -> 8 -> 8 -> 8 ( 41 shots) [1001 -> 0001 -> 0001 -> 0001 -> 0001]
4 -> 5 -> 13 -> 12 -> 12 ( 41 shots) [0010 -> 1010 -> 1011 -> 0011 -> 0011]
14 -> 15 -> 7 -> 6 -> 7 ( 40 shots) [0111 -> 1111 -> 1110 -> 0110 -> 1110]
3 -> 2 -> 2 -> 2 -> 3 ( 39 shots) [1100 -> 0100 -> 0100 -> 0100 -> 1100]
Final measurement histogram:
0100 ( 2) : 78
0111 (14) : 78
0110 ( 6) : 77
0011 (12) : 68
1110 ( 7) : 68
1011 (13) : 68
1000 ( 1) : 63
1001 ( 9) : 62
1101 (11) : 61
1100 ( 3) : 60
1010 ( 5) : 60
1111 (15) : 59
0101 (10) : 57
0001 ( 8) : 57
0000 ( 0) : 55
0010 ( 4) : 53
*bit_stringは逆に書いてありますが、Chat GPT o3に相談した所、ただ反転してあるだけで、どっちでも良いそうです。例えば「2」が「0100」になっている点。