0
0

More than 5 years have passed since last update.

chainerの作法 その6

Posted at

概要

chainerの作法、調べてみた。
fizzubuzz問題、やってみた。

サンプルコード

lossが、softmaxのタイプ。

import numpy as np
from chainer import Variable, datasets, iterators
from chainer import optimizers
from chainer import Chain
from chainer import training
from chainer.training import extensions
import chainer.functions as F
import chainer.links as L


class MLP(Chain):
    def __init__(self, n_in, n_units, n_out):
        super(MLP, self).__init__(l1 = L.Linear(n_in, n_units), l2 = L.Linear(n_units, n_out))
    def __call__(self, x):
        return self.l2(F.tanh(self.l1(x)))

def binary_encode(i, num_digits):
    return np.array([i >> d & 1 for d in range(num_digits)])

def fizz_buzz_encode(i):
    if i % 15 == 0:
        return 3
    elif i % 5  == 0:
        return 2
    elif i % 3  == 0:
        return 1
    else:
        return 0

def fizz_buzz(i, prediction):
    return [str(i), "fizz", "buzz", "fizzbuzz"][prediction]

def make_data():
    X = np.array([binary_encode(i, 7) for i in range(101)], dtype = np.float32)
    T = np.array([fizz_buzz_encode(i) for i in range(101)], dtype = np.int32)
    return datasets.TupleDataset(X, T)

def main():
    train = make_data()
    epoch = 400
    batchsize = 40
    model = L.Classifier(MLP(7, 18, 4))
    optimizer = optimizers.RMSprop()
    optimizer.setup(model)
    train_iter = iterators.SerialIterator(train, batchsize)
    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (epoch, 'epoch'), out = 'result')
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy']))
    trainer.run()
    x = np.array([binary_encode(i, 7) for i in range(101)], dtype = np.float32)
    y = model.predictor(x).data
    for i in range(100):
        print (fizz_buzz(i + 1, y[i + 1].argmax()))

if __name__ == '__main__':
    main()


サンプルコード

lossが、mean_squaredのタイプ。
たぶん、こっちが、one_hot。

import numpy as np
from chainer import Variable, datasets, iterators
from chainer import optimizers
from chainer import Chain
from chainer import training
from chainer.training import extensions
import chainer.functions as F
import chainer.links as L


class MLP(Chain):
    def __init__(self, n_in, n_units, n_out):
        super(MLP, self).__init__(l1 = L.Linear(n_in, n_units), l2 = L.Linear(n_units, n_out))
    def __call__(self, x):
        return self.l2(F.tanh(self.l1(x)))

def binary_encode(i, num_digits):
    return np.array([i >> d & 1 for d in range(num_digits)])

def fizz_buzz_encode(i):
    if i % 15 == 0:
        return np.array([0, 0, 0, 1])
    elif i % 5  == 0:
        return np.array([0, 0, 1, 0])
    elif i % 3  == 0:
        return np.array([0, 1, 0, 0])
    else:
        return np.array([1, 0, 0, 0])

def fizz_buzz(i, prediction):
    return [str(i), "fizz", "buzz", "fizzbuzz"][prediction]

def make_data():
    X = np.array([binary_encode(i, 7) for i in range(101)], dtype = np.float32)
    T = np.array([fizz_buzz_encode(i) for i in range(101)], dtype = np.float32)
    return datasets.TupleDataset(X, T)

def main():
    train = make_data()
    epoch = 400
    batchsize = 40
    model = L.Classifier(MLP(7, 18, 4), lossfun = F.mean_squared_error)
    model.compute_accuracy = False
    optimizer = optimizers.RMSprop()
    optimizer.setup(model)
    train_iter = iterators.SerialIterator(train, batchsize)
    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (epoch, 'epoch'), out = 'result')
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy']))
    trainer.run()
    x = np.array([binary_encode(i, 7) for i in range(101)], dtype = np.float32)
    y = model.predictor(x).data
    for i in range(100):
        print (fizz_buzz(i + 1, y[i + 1].argmax()))

if __name__ == '__main__':
    main()

以上。

0
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0