Edited at

# 二層のニューラルネットワークの実装2

More than 1 year has passed since last update.

ゼロから作るDeepLearningの第4章を参考

TwoLayerNet.py

```import numpy as np

class TwoLayerNet:

def __init__(self,input_size,hidden_size,output_size,weight_init_std=0.01):
#重みの初期化
self.params = {}
#784 * 50の重み行列
self.params['W1'] = weight_init_std * np.random.randn(input_size,hidden_size)
#50 * 10の重み行列
self.params['W2'] = weight_init_std * np.random.randn(hidden_size,output_size)
#バイアス，隠れ層の数だけ
self.params['b1'] = np.zeros(hidden_size)
#バイアス，出力層の数だけ
self.params['b2'] = np.zeros(output_size)

def sigmoid(self,x):
return 1 / (1 + np.exp(-x))

def softmax(self,a):
c = np.max(a)
exp_a = np.exp(a - c)#オーバーフロー対策
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y

h = 1e-4 # 0.0001

for idx in range(x.size):
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)

x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)

x[idx] = tmp_val # 値を元に戻す

if X.ndim == 1:
else:

for idx, x in enumerate(X):

def cross_entropy_error(self,y,t):
if y.ndim == 1:
t = t.reshape(1,t.size)
y = y.reshape(1,y.size)
batch_size = y.shape[0]
return -np.sum(t * np.log(y)) / batch_size

def predict(self,x):
W1,W2 = self.params['W1'],self.params['W2']
b1,b2 = self.params['b1'],self.params['b2']

a1 = np.dot(x,W1) + b1 #a = Wx + b
z1 = self.sigmoid(a1)
a2 = np.dot(z1,W2) + b2
z2 = self.softmax(a2)

return z2

def loss(self, x, t):
y = self.predict(x)

return self.cross_entropy_error(y,t)

def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y,axis=1)
t = np.argmax(t,axis=1)

accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy

loss_W = lambda W: self.loss(x,t)

```

LearningMNIST.py

```#coding: utf-8
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.preprocessing import OneHotEncoder

mnist = fetch_mldata('MNIST original', data_home=".")

x_train = mnist['data'][:60000]
t_train = mnist['target'][:60000]

t_train = t_train.reshape(1, -1).transpose()
# encode label
encoder = OneHotEncoder(n_values=max(t_train)+1)
t_train = encoder.fit_transform(t_train).toarray()

x_test = mnist['data'][60000:]
t_test = mnist['target'][60000:]

t_test = t_test.reshape(1, -1).transpose()
# encode label
encoder = OneHotEncoder(n_values=max(t_test)+1)
t_test = encoder.fit_transform(t_test).toarray()

x_train = x_train.astype(np.float64)
x_train /= x_train.max()
x_test = x_test.astype(np.float64)
x_test /= x_test.max()

train_loss_list = []
train_acc_list = []
test_acc_list = []

#hyper parameter
iters_num = 2
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1

# 1エポックあたりの繰り返し数
#iter_per_epoch = max(train_size / batch_size, 1)
iter_per_epoch = 50

network = TwoLayerNet(input_size=784,hidden_size=50,output_size=10)

iters_num = 500
for i in range(iters_num):

for key in ('W1','W2','b1','b2'):

loss = network.loss(x_batch,t_batch)
train_loss_list.append(loss)
# 1エポックごとに認識精度を計算
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))
```

この結果が下のグラフ，縦軸が精度で横軸がエポック数．

エポックごとに，精度が向上してる．