# 概要

chainerの作法、調べてみた。
fizzubuzz問題、やってみた。

# サンプルコード

lossが、softmaxのタイプ。

```import numpy as np
from chainer import Variable, datasets, iterators
from chainer import optimizers
from chainer import Chain
from chainer import training
from chainer.training import extensions
import chainer.functions as F

class MLP(Chain):
def __init__(self, n_in, n_units, n_out):
super(MLP, self).__init__(l1 = L.Linear(n_in, n_units), l2 = L.Linear(n_units, n_out))
def __call__(self, x):
return self.l2(F.tanh(self.l1(x)))

def binary_encode(i, num_digits):
return np.array([i >> d & 1 for d in range(num_digits)])

def fizz_buzz_encode(i):
if i % 15 == 0:
return 3
elif i % 5  == 0:
return 2
elif i % 3  == 0:
return 1
else:
return 0

def fizz_buzz(i, prediction):
return [str(i), "fizz", "buzz", "fizzbuzz"][prediction]

def make_data():
X = np.array([binary_encode(i, 7) for i in range(101)], dtype = np.float32)
T = np.array([fizz_buzz_encode(i) for i in range(101)], dtype = np.int32)
return datasets.TupleDataset(X, T)

def main():
train = make_data()
epoch = 400
batchsize = 40
model = L.Classifier(MLP(7, 18, 4))
optimizer = optimizers.RMSprop()
optimizer.setup(model)
train_iter = iterators.SerialIterator(train, batchsize)
updater = training.StandardUpdater(train_iter, optimizer)
trainer = training.Trainer(updater, (epoch, 'epoch'), out = 'result')
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy']))
trainer.run()
x = np.array([binary_encode(i, 7) for i in range(101)], dtype = np.float32)
y = model.predictor(x).data
for i in range(100):
print (fizz_buzz(i + 1, y[i + 1].argmax()))

if __name__ == '__main__':
main()

```

# サンプルコード

lossが、mean_squaredのタイプ。
たぶん、こっちが、one_hot。

```import numpy as np
from chainer import Variable, datasets, iterators
from chainer import optimizers
from chainer import Chain
from chainer import training
from chainer.training import extensions
import chainer.functions as F

class MLP(Chain):
def __init__(self, n_in, n_units, n_out):
super(MLP, self).__init__(l1 = L.Linear(n_in, n_units), l2 = L.Linear(n_units, n_out))
def __call__(self, x):
return self.l2(F.tanh(self.l1(x)))

def binary_encode(i, num_digits):
return np.array([i >> d & 1 for d in range(num_digits)])

def fizz_buzz_encode(i):
if i % 15 == 0:
return np.array([0, 0, 0, 1])
elif i % 5  == 0:
return np.array([0, 0, 1, 0])
elif i % 3  == 0:
return np.array([0, 1, 0, 0])
else:
return np.array([1, 0, 0, 0])

def fizz_buzz(i, prediction):
return [str(i), "fizz", "buzz", "fizzbuzz"][prediction]

def make_data():
X = np.array([binary_encode(i, 7) for i in range(101)], dtype = np.float32)
T = np.array([fizz_buzz_encode(i) for i in range(101)], dtype = np.float32)
return datasets.TupleDataset(X, T)

def main():
train = make_data()
epoch = 400
batchsize = 40
model = L.Classifier(MLP(7, 18, 4), lossfun = F.mean_squared_error)
model.compute_accuracy = False
optimizer = optimizers.RMSprop()
optimizer.setup(model)
train_iter = iterators.SerialIterator(train, batchsize)
updater = training.StandardUpdater(train_iter, optimizer)
trainer = training.Trainer(updater, (epoch, 'epoch'), out = 'result')
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy']))
trainer.run()
x = np.array([binary_encode(i, 7) for i in range(101)], dtype = np.float32)
y = model.predictor(x).data
for i in range(100):
print (fizz_buzz(i + 1, y[i + 1].argmax()))

if __name__ == '__main__':
main()

```