LoginSignup
1
1

More than 5 years have passed since last update.

ディープラーニングを実装から学ぶ(8)実装変更

Last updated at Posted at 2018-11-04

ニューラルネットワークの各手法を試してきました。今後、CNN、RNNに対応するため実装の見直しを行います。

変更の方針

  • 活性化関数、ドロップアウト、バッチ正規化など各層一律の適用でしたが、任意の階層に任意の活性化関数を適用できるようにします。ドロップアウト、バッチ正規化み1つの層とみなします。
  • 指定回数エポック実行後に、追加で実行できるようにします。途中で状態を保存し、後日、続きから実行することも可能にします。
  • 学習率、オプティマイザーを途中から変更できるようにします。

定義

モデル

定義

任意の階層を定義できるようにします。今までの中間層のノード数が100,50の場合は、以下のように定義します。

model = create_model(28*28)
model = add_layer(model, "affine1", affine, 100)
model = add_layer(model, "relu1", relu)
model = add_layer(model, "affine2", affine, 50)
model = add_layer(model, "relu2", relu)
model = add_layer(model, "affine3", affine, 10)
model = set_output(model, softmax)
model = set_error(model, cross_entropy_error)

create_modelでモデルを作成し、add_layerでレイヤーを追加していきます。出力関数と誤差関数は、set_optput,set_errorで設定します。

# モデル作成関数
model = create_model(input_shape)
# 引数
#  input_shape : 入力データの次元
# 戻り値
#  モデル

# 層追加関数
model = add_layer(model, name, func, d=None, **kwargs)
# 引数
#  model : モデル
#  name  : レイヤの名前
#  func  : 中間層の関数
#  d     : ノード数
#  kwargs: 中間層の関数のパラメータ
# 戻り値
#  モデル

# 出力関数
mode = set_output(model, func, **kwargs)
# 引数
#  model : モデル
#  func  : 出力層の関数
#  kwargs: 出力層の関数のパラメータ
# 戻り値
#  モデル

# 損失関数
mode = set_error(model, func, **kwargs)
# 引数
#  model : モデル
#  func  : 損失関数
#  kwargs: 損失関数のパラメータ
# 戻り値
#  モデル

モデルの定義部分の実装です。

from collections import OrderedDict
import pickle

def create_model(d):
    # modelの生成
    model = {}
    model["input"] = {"d":d}
    model["layer"] = OrderedDict()
    model["weight_decay"] = None
    return model

def add_layer(model, name, func, d=None, **kwargs):
    # layerの設定
    model["layer"][name] = {"func":func, "back_func":eval(func.__name__ + "_back"), "d":d, "params":kwargs, "weights":None}
    return model

def set_output(model, func, **kwargs):
    # layerの設定
    model["output"] = {"func":func, "d":1, "params":kwargs}
    return model

def set_error(model, func, **kwargs):
    # layerの設定
    model["error"] = {"func":func, "d":1, "params":kwargs}
    return model

重み減衰は、レイヤではありませんが、modelに設定します。

# 重み減衰関数
mode = set_weight_decay(model, func, **kwargs):
# 引数
#  model : モデル
#  func  : 重み減衰関数
#  kwargs: 重み減衰関数のパラメータ
# 戻り値
#  モデル
def set_weight_decay(model, func, **kwargs):
    model["weight_decay"] = {"func":func, "back_func":eval(func.__name__ + "_back"), "params":kwargs}
    return model

初期化

init_modelで、affineの重み、バイアスなど、初期化が必要な変数の初期化を行います。重み、バイアスは、modelの中に格納します。

def init_model(model):
    # modelの初期化
    # input
    d_prev = 0
    d_next = model["input"]["d"]
    print("input", "-", d_prev, d_next)
    # layer
    d_prev = d_next
    for k, v in model["layer"].items():
        # 要素数取得
        d = v["d"] if v["d"] != None else d_prev
        # 重み、バイアスの初期化
        d_next = d
        if v["func"].__name__ + "_init_layer" in globals():
            init_layer_func = eval(v["func"].__name__ + "_init_layer")
            d_next, v["weights"] = init_layer_func(d_prev, d, **v["params"])
        # 要素数設定
        v["d_prev"], v["d_next"] = d_prev, d_next
        # モデル表示
        print(k, v["func"].__name__, d_prev, d_next)
        d_prev = d_next
    # output
    model["output"]["d_prev"] = d_prev
    print("output", model["output"]["func"].__name__, d_prev)
    # error
    print("error", model["error"]["func"].__name__)

    return model

保存、読み込み

save_model,load_modelでモデルを保存、読み込みができます。

def save_model(model, file_path):
    f = open(file_path, "wb")
    pickle.dump(model, f)
    f.close
    return

def load_model(file_path):
    f = open(file_path, "rb")
    model = pickle.load(f)
    f.close
    return model

オプティマイザ

定義

オプティマイザを定義します。
Adamの設定例です。

optimizer = create_optimizer(Adam, lr=0.1, beta1=0.9, beta2=0.999)
# オプティマイザ作成関数
optimizer = create_optimizer(func, **kwargs)
# 引数
#  func  : オプティマイザ関数
#  kwargs: オプティマイザ関数のパラメータ
# 戻り値
#  オプティマイザ

オプティマイザ定義部分の実装です。
初期化関数でオプティマイザで利用する変数を初期化します。modelと同様に、保存、読み込み関数を定義します。

def create_optimizer(func, **kwargs):
    optimizer = {"func":func, "params":kwargs}
    return optimizer

def init_optimizer(optimizer, model):
    optimizer_stats = {}
    for k, v in model["layer"].items():
        # オプティマイザの初期化
        if v["func"].__name__ + "_init_optimizer" in globals():
            init_optimizer_func = eval(v["func"].__name__ + "_init_optimizer")
            optimizer_stats[k] = init_optimizer_func()
    optimizer["stats"] = optimizer_stats

    return optimizer

def save_optimizer(optimizer, file_path):
    f = open(file_path, "wb")
    pickle.dump(optimizer, f)
    f.close
    return

def load_optimizer(file_path):
    f = open(file_path, "rb")
    optimizer = pickle.load(f)
    f.close
    return optimizer

順伝播、逆伝播、重みの更新

順伝播

モデルの各レイヤを実行します。その後に、出力関数と誤差関数を実行します。

活性化関数には、前層の出力データを渡します。ただし、affineなど重さを利用する場合は、propergation関数を定義し、重さを受け渡しできるようにします。
propergation関数が定義されている場合は、定義されたpropergation関数を呼び出します。

def propagation(model, x, t=None, learn_flag=True):
    us = {}
    u = x
    err = None
    weight_decay_sum = 0
    # layer
    for k, v in model["layer"].items():
        # propagation関数設定
        propagation_func = middle_propagation
        if v["func"].__name__ + "_propagation" in globals():
            propagation_func = eval(v["func"].__name__ + "_propagation")
        # propagation関数実行
        us[k], weight_decay_r = propagation_func(v["func"], u, v["weights"], model["weight_decay"], learn_flag, **v["params"])
        u = us[k]["z"]  
        weight_decay_sum = weight_decay_sum + weight_decay_r
    # output
    if "output" in model:
        propagation_func = output_propagation
        # propagation関数実行
        us["output"] = propagation_func(model["output"]["func"], u, learn_flag, **model["output"]["params"])
        u = us["output"]["z"]
    # error
    y = u
    # 学習時には、誤差は計算しない
    if learn_flag == False:
        if "error" in model:
            if t is not None:
                err = model["error"]["func"](y, t)
        # 重み減衰
        if "weight_decay" is not None:
            if learn_flag:
                err = err + weight_decay_sum

    return y, err, us

逆伝播

逆伝播も順伝播と同様です。
重さを使う場合は、back_propagation関数を定義します。back_propagation関数が定義されている場合は、定義されたback_propagation関数を呼び出します。

def back_propagation(model, x=None, t=None, y=None, us=None, du=None):
    dus = {}
    if du is None:
        # 出力層+誤差勾配関数
        output_error_back_func = eval(model["output"]["func"].__name__ + "_" + model["error"]["func"].__name__ + "_back")
        du = output_error_back_func(y, us["output"]["u"], t)
        dus["output"] = {"du":du}
    dz = du
    for k, v in reversed(model["layer"].items()):
        # back propagation関数設定
        back_propagation_func = middle_back_propagation
        if v["func"].__name__ + "_back_propagation" in globals():
            back_propagation_func = eval(v["func"].__name__ + "_back_propagation")
        # back propagation関数実行
        dus[k] = back_propagation_func(v["back_func"], dz, us[k], v["weights"], model["weight_decay"], v["calc_du_flag"], **v["params"])
        dz = dus[k]["du"]
        # du計算フラグがFalseだと以降計算しない
        if v["calc_du_flag"] == False:
            break

    return dz, dus

重みの更新

affineなど重みを更新するレイヤでは、update_weight関数を定義し、重みの更新を行います。

def update_weight(model, dus, optimizer):
    for k, v in model["layer"].items():
        # 重み更新
        if v["func"].__name__ + "_update_weight" in globals():
            update_weight_func = eval(v["func"].__name__ + "_update_weight")
            v["weights"], optimizer["stats"][k] = update_weight_func(optimizer["func"], dus[k], v["weights"], optimizer["stats"][k], **optimizer["params"])
    return model, optimizer

誤差

誤差関数を呼び出します。

def error(model, y, t):
    return model["error"]["func"](y, t)

個別実装

それぞれの関数ごとに個別の実装を追加します。
個別に定義する関数名は、以下とします。

  • モデル初期化関数 - 関数名 + "_init_layer"
  • オプティマイザ初期化関数 - 関数名 + "_init_optimizer"
  • 順伝播 - 関数名 + "_propagation"
  • 逆伝播 - 関数名 + "_back_propagation"
  • 重みの更新 - 関数名 + "_update_weight"

必要な関数のみ定義します。

共通

特別の処理が不要な活性化関数は、以下の関数で共通的に対応します。

def middle_propagation(func, u, weights, weight_decay, learn_flag, **params):
    z = func(u, **params)
    return {"u":u, "z":z}, 0

def middle_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, **params):
    du = back_func(dz, us["u"], us["z"], **params)
    return {"du":du}

def output_propagation(func, u, learn_flag, **params):
    z = func(u, **params)
    return {"u":u, "z":z}

affine

重みW、バイアスbを利用しますので、各関数を定義します。

def affine_init_layer(d_prev, d, weight_init_func=he_normal, weight_init_params={}, bias_init_func=zeros_b, bias_init_params={}):
    W = weight_init_func(d_prev, d, **weight_init_params)
    b = bias_init_func(d, **bias_init_params)
    return d, {"W":W, "b":b}

def affine_init_optimizer():
    sW = {}
    sb = {}
    return {"sW":sW, "sb":sb}

def affine_propagation(func, u, weights, weight_decay, learn_flag, **params):
    z = func(u, weights["W"], weights["b"])
    # 重み減衰対応
    weight_decay_r = 0
    if weight_decay is not None:
        weight_decay_r = weight_decay["func"](weights["W"], **weight_decay["params"])
    return {"u":u, "z":z}, weight_decay_r

def affine_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, **params):
    du, dW, db = back_func(dz, us["u"], weights["W"], weights["b"], calc_du_flag)
    # 重み減衰対応
    if weight_decay is not None:
        dW = dW + weight_decay["back_func"](weights["W"], **weight_decay["params"])
    return {"du":du, "dW":dW, "db":db}

def affine_update_weight(func, du, weights, optimizer_stats, **params):
    weights["W"], optimizer_stats["sW"] = func(weights["W"], du["dW"], **params, **optimizer_stats["sW"])
    weights["b"], optimizer_stats["sb"] = func(weights["b"], du["db"], **params, **optimizer_stats["sb"])
    return weights, optimizer_stats

maxout

重みW、バイアスbを利用しますので、各関数を定義します。

def maxout_init_layer(d_prev, d, unit=1, weight_init_func=he_normal, weight_init_params={}, bias_init_func=zeros_b, bias_init_params={}):
    W = weight_init_func(d_prev, d*unit, **weight_init_params)
    b = bias_init_func(d*unit, **bias_init_params)
    return d, {"W":W, "b":b}

def maxout_init_optimizer():
    sW = {}
    sb = {}
    return {"sW":sW, "sb":sb}

def maxout_propagation(func, u, weights, weight_decay, learn_flag, unit=1, **params):
    u_affine = affine(u, weights["W"], weights["b"])
    z   = func(u_affine, unit)
    # 重み減衰対応(未対応)
    weight_decay_r = 0
    #if weight_decay is not None:
    #    weight_decay_r = weight_decay["func"](weights["W"], **weight_decay["params"])
    return {"u":u, "z":z, "u_affine":u_affine}, weight_decay_r

def maxout_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, unit=1, **params):
    du_affine = back_func(dz, us["u_affine"], us["z"], unit)
    du, dW, db = affine_back(du_affine, us["u"], weights["W"], weights["b"])
    # 重み減衰対応(未対応)
    #if weight_decay is not None:
    #    dW = dW + weight_decay["back_func"](weights["W"], **weight_decay["params"])
    return {"du":du, "dW":dW, "db":db}

def maxout_update_weight(func, du, weights, optimizer_stats, **params):
    weights["W"], optimizer_stats["sW"] = func(weights["W"], du["dW"], **params, **optimizer_stats["sW"])
    weights["b"], optimizer_stats["sb"] = func(weights["b"], du["db"], **params, **optimizer_stats["sb"])
    return weights, optimizer_stats

prelu

alphaを学習するため、各関数を定義します。

def prelu_init_layer(d_prev, d):
    alpha = np.zeros(d)
    return d, {"alpha":alpha}

def prelu_init_optimizer():
    salpha = {}
    return {"salpha":salpha}

def prelu_propagation(func, u, weights, weight_decay, learn_flag, **params):
    z = func(u, weights["alpha"])
    return {"u":u, "z":z}, 0

def prelu_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, **params):
    du, dalpha = back_func(dz, us["u"], us["z"], weights["alpha"])
    return {"du":du, "dalpha":dalpha}

def prelu_update_weight(func, du, weights, optimizer_stats, **params):
    weights["alpha"], optimizer_stats["salpha"] = func(weights["alpha"], du["dalpha"], **params, **optimizer_stats["salpha"])
    return weights, optimizer_stats

rrelu

実行時に乱数を利用し、alphaを決定するため、各関数を定義します。

def rrelu_init_layer(d_prev, d, min=0.0, max=0.1):
    alpha = np.random.uniform(min, max, d)
    return d, {"alpha":alpha}

def rrelu_propagation(func, u, weights, weight_decay, learn_flag, min=0.0, max=0.1):
    z = func(u, weights["alpha"])
    return {"u":u, "z":z}, 0

def rrelu_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, min=0.0, max=0.1):
    du = back_func(dz, us["u"], us["z"], weights["alpha"])
    return {"du":du}

batch_normalization

gamma、betaを学習するため、各関数を定義します。

今までは、予測時も予測データの平均、分散を利用していましたが、本来は学習時の平均、分散を利用すべきでした。予測時には、学習時の平均、分散を利用するように実装変更しました。

def batch_normalization_init_layer(d_prev, d, batch_norm_node=True, use_gamma_beta=True, use_train_stats=True, alpha=0.1):
    # バッチ正規化
    dim = d
    if type(d) == int:
        dim = (d,)
    # gamma,beta初期化
    if batch_norm_node:
        gamma = np.ones((1,) + dim)
        beta  = np.zeros((1,) + dim)
    else:
        gamma = np.ones(1)
        beta  = np.zeros(1)
    # 学習時の統計情報初期化
    train_mean = None
    train_var  = None
    if use_train_stats:
        if batch_norm_node:
            train_mean = np.zeros((1,) + dim)
            train_var  = np.ones((1,) + dim)
        else:
            train_mean = np.zeros(1)
            train_var  = np.ones(1)

    return d, {"gamma":gamma, "beta":beta, "use_gamma_beta":use_gamma_beta,
               "use_train_stats":use_train_stats, "alpha":alpha, "train_mean":train_mean, "train_var":train_var}

def batch_normalization_init_optimizer():
    sgamma = {}
    sbeta = {}
    return {"sgamma":sgamma, "sbeta":sbeta}

def batch_normalization_propagation(func, u, weights, weight_decay, learn_flag, **params):
    if learn_flag or not weights["use_train_stats"]:
        z, u_mean, u_var = func(u, weights["gamma"], weights["beta"])
    else:
        z, u_mean, u_var = func(u, weights["gamma"], weights["beta"], u_mean=weights["train_mean"], u_var=weights["train_var"])
    return {"u":u, "z":z, "u_mean":u_mean, "u_var":u_var}, 0

def batch_normalization_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, **params):
    du, dgamma, dbeta, u_mean, u_var = back_func(dz, us["u"], weights["gamma"], weights["beta"], u_mean=us["u_mean"], u_var=us["u_var"])
    return {"du":du, "dgamma":dgamma, "dbeta":dbeta, "u_mean":u_mean, "u_var":u_var}

def batch_normalization_update_weight(func, du, weights, optimizer_stats, **params):
    if weights["use_gamma_beta"]:
        weights["gamma"], optimizer_stats["sgamma"] = func(weights["gamma"], du["dgamma"], **params, **optimizer_stats["sgamma"])
        weights["beta"],  optimizer_stats["sbeta"]  = func(weights["beta"] , du["dbeta"],  **params, **optimizer_stats["sbeta"])
    if weights["use_train_stats"]:
        weights["train_mean"] = (1 - weights["alpha"]) * weights["train_mean"] + weights["alpha"] * du["u_mean"]
        weights["train_var"]  = (1 - weights["alpha"]) * weights["train_var"]  + weights["alpha"] * du["u_var"]
    return weights, optimizer_stats

dropout

実行時に乱数を利用し、ドロップアウトマスクを決定するため、各関数を定義します。

def dropout_propagation(func, u, weights, weight_decay, learn_flag, dropout_ratio=0.9):
    dropout_mask = None
    if learn_flag:
        dropout_mask = set_dropout_mask(u[-1].shape, dropout_ratio)
        z = func(u, dropout_mask)
    else:
        z = u * dropout_ratio
    return {"u":u, "z":z, "dropout_mask":dropout_mask}, 0

def dropout_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, dropout_ratio=0.9):
    du = back_func(dz, us["dropout_mask"])
    return {"du":du}

関数群

モデル、オプティマイザで定義する関数群です。
基本的には、今までと同じですが、以下の点を変更しています。

  • 引数名の統一
  • オプションパラメータを辞書を利用したparamsから個別のパラメータに変更
  • 層に対応するため、dropoutの実装見直し
# affine変換
def affine(u, W, b):
    return np.dot(u, W) + b

def affine_back(dz, u, W, b):
    du = np.dot(dz, W.T)                    # zの勾配は、今までの勾配と重みを掛けた値
    dW = np.dot(u.T, dz)                    # 重みの勾配は、zに今までの勾配を掛けた値
    db = np.dot(np.ones(u.shape[0]).T, dz)  # バイアスの勾配は、今までの勾配の値
    return du, dW, db

# 活性化関数(中間)
def sigmoid(u):
    return 1/(1 + np.exp(-u))

def sigmoid_back(dz, u, z):
    return dz * (z - z**2)

def tanh(u):
    return np.tanh(u)

def tanh_back(dz, u, z):
    return dz * (1 - z**2)
#    return dz * (1/np.cosh(u)**2)

def relu(u):
    return np.maximum(0, u)

def relu_back(dz, u, z):
    return dz * np.where(u > 0, 1, 0)

def leaky_relu(u, alpha):
    return np.maximum(u * alpha, u)

def leaky_relu_back(dz, u, z, alpha):
    return dz * np.where(u > 0, 1, alpha)

def prelu(u, alpha):
    return np.where(u > 0, u, u * alpha )

def prelu_back(dz, u, z, alpha):
    return dz * np.where(u > 0, 1, alpha), np.sum(dz * np.where(u > 0, 0, u), axis=0)/u.shape[0]

def rrelu(u, alpha):
    return np.where(u > 0, u, u * alpha )

def rrelu_back(dz, u, z, alpha):
    return dz * np.where(u > 0, 1, alpha)

def relun(u, n):
    return np.minimum(np.maximum(0, u), n)

def relun_back(dz, u, z, n):
    return dz * np.where(u > n, 0, np.where(u > 0, 1, 0))

def srelu(u, alpha):
    return np.where(u > alpha, u, alpha)

def srelu_back(dz, u, z, alpha):
    return dz * np.where(u > alpha, 1, 0)

def elu(u, alpha):
    return np.where(u > 0, u,  alpha * (np.exp(u) - 1))

def elu_back(dz, u, z, alpha):
    return dz * np.where(u > 0, 1,  alpha * np.exp(u))

def maxout(u, unit):
    u = u.reshape((u.shape[0],int(u.shape[1]/unit),unit))
    return np.max(u, axis=2)

def maxout_back(dz, u, z, unit):
    u = u.reshape((u.shape[0],int(u.shape[1]/unit),unit))
    u_max = np.argmax(u,axis=2)
    du = np.zeros_like(u)
    for i in range(du.shape[0]):
        for j in range(du.shape[1]):
            du[i,j,u_max[i,j]] = dz[i,j]
    return du.reshape((du.shape[0],int(du.shape[1]*unit)))

def identity(u):
    return u

def identity_back(dz, u, z):
    return dz

def softplus(u):
    return np.log(1+np.exp(u))

def softplus_back(dz, u, z):
    return dz * (1/(1 + np.exp(-u)))

def softsign(u):
    return u/(1+np.absolute(u))

def softsign_back(dz, u, z):
    return dz * (1/(1+np.absolute(u))**2)

def step(u):
    return np.where(u > 0, 1, 0)

def step_back(dz, u, x):
    return 0

# 活性化関数(出力)
def softmax(u):
    u = u.T
    max_u = np.max(u, axis=0)
    exp_u = np.exp(u - max_u)
    sum_exp_u = np.sum(exp_u, axis=0)
    y = exp_u/sum_exp_u
    return y.T

# 損失関数
def mean_squared_error(y, t):
    size = 1
    if y.ndim == 2:
        size = y.shape[0]
    return 0.5 * np.sum((y-t)**2)/size

def cross_entropy_error(y, t):
    size = 1
    if y.ndim == 2:
        size = y.shape[0]
    if t.shape[t.ndim-1] == 1:
        # 2値分類
        return -np.sum(t * np.log(np.maximum(y,1e-7)) + (1 - t) * np.log(np.maximum(1 - y,1e-7)))/size
    else:
        # 多クラス分類
        return -np.sum(t * np.log(np.maximum(y,1e-7)))/size

# 活性化関数(出力)+損失関数勾配
def softmax_cross_entropy_error_back(y, u, t):
    size = 1
    if y.ndim == 2:
        size = y.shape[0]
    return (y - t)/size

def sigmoid_cross_entropy_error_back(y, u, t):
    size = 1
    if y.ndim == 2:
        size = y.shape[0]
    return (y - t)/size

def identity_mean_squared_error_back(y, u, t):
    size = 1
    if y.ndim == 2:
        size = y.shape[0]
    return (y - t)/size

# 重み・バイアスの初期化
def lecun_normal(d_1, d):
    var = 1/np.sqrt(d_1)
    return np.random.normal(0, var, (d_1, d))
def lecun_uniform(d_1, d):
    min = -np.sqrt(3/d_1)
    max = np.sqrt(3/d_1)
    return np.random.uniform(min, max, (d_1, d))

def glorot_normal(d_1, d):
    var = np.sqrt(2/(d_1+d))
    return np.random.normal(0, var, (d_1, d))
def glorot_uniform(d_1, d):
    min = -np.sqrt(6/(d_1+d))
    max = np.sqrt(6/(d_1+d))
    return np.random.uniform(min, max, (d_1, d))

def he_normal(d_1, d):
    var = np.sqrt(2/d_1)
    return np.random.normal(0, var, (d_1, d))
def he_uniform(d_1, d):
    min = -np.sqrt(6/d_1)
    max = np.sqrt(6/d_1)
    return np.random.uniform(min, max, (d_1, d))

def normal_w(d_1, d, mean=0, var=1):
    return np.random.normal(mean, var, (d_1, d))
def normal_b(d, params, mean=0, var=1):
    return np.random.normal(mean, var, d)

def uniform_w(d_1, d, min=0, max=1):
    return np.random.uniform(min, max, (d_1, d))
def uniform_b(d, params, min=0, max=1):
    return np.random.uniform(min, max, d)

def zeros_w(d_1, d):
    return np.zeros((d_1, d))
def zeros_b(d):
    return np.zeros(d)

def ones_w(d_1, d):
    return np.ones((d_1, d))
def ones_b(d):
    return np.ones(d)

# 勾配法
def SGD(W, dW, lr=0.1):
    return W - lr*dW, {}

def Momentum(W, dW, lr=0.01, mu=0.9, v=None):
    if v is None: # vの初期値設定
        v = np.zeros_like(W)
    # vの更新
    v = mu * v - lr * dW
    return W + v, {"v":v}

def Nesterov(W, dW, lr=0.01, mu=0.9, v=None):
    if v is None:
        v = np.zeros_like(W)
    v = mu * v - lr * dW
    return W + v, {"v":v}

def AdaGrad(W, dW, lr=0.01, epsilon=1e-7, g=None):
    if g is None:
        g = np.zeros_like(W)
    g =  g + (dW * dW)
    return W - (lr * dW)/np.sqrt(np.maximum(g, epsilon)), {"g":g}

def RMSProp(W, dW, lr=0.001, gamma=0.9, epsilon=1e-7, g=None):
    if g is None:
        g = np.zeros_like(W)
    g =  gamma * g + (1 - gamma) * dW * dW
    return W - (lr * dW)/np.sqrt(np.maximum(g, epsilon)), {"g":g}

def Adadelta(W, dW, lr=1.0, gamma=0.9, epsilon=1e-7, g=None, s=None):
    if g is None:
        g = np.zeros_like(W)
    if s is None:
        s = np.zeros_like(W)
    g = gamma * g + (1 - gamma) * dW * dW
    newW = W - (lr * dW)/np.sqrt(np.maximum(g, epsilon)) * np.sqrt(np.maximum(s, epsilon))
    s = (1 - gamma) * (newW - W) * (newW - W) + gamma * s
    return newW, {"g":g, "s":s}

def Adam(W, dW, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7, k=0, m=None, v=None):
    if m is None:
        m = np.zeros_like(W)
    if v is None:
        v = np.zeros_like(W)
    k = k + 1
    m = beta1 * m + (1 - beta1) * dW
    v = beta2 * v + (1 - beta2) * dW * dW
    hatm = m / (1 - np.power(beta1, k))
    hatv = v / (1 - np.power(beta2, k))
    return W - lr / np.maximum(np.sqrt(hatv), epsilon) * hatm, {"k":k, "m":m, "v":v}

# 重み減衰
def L1_norm(W, weight_decay_lambda):
    r = np.sum(np.absolute(W))
    return r * weight_decay_lambda

def L1_norm_back(W, weight_decay_lambda):
    return np.where(W > 0, weight_decay_lambda, np.where(W < 0, -weight_decay_lambda, 0))

def L2_norm(W, weight_decay_lambda):
    r = np.sum(W**2)
    return (r * weight_decay_lambda)/2

def L2_norm_back(W, weight_decay_lambda):
    return weight_decay_lambda * W

# ドロップアウト
def dropout(z, mask):
    return z * mask

def dropout_back(dz, mask):
    return dz * mask

def set_dropout_mask(d, dropout_ratio):
    mask = np.zeros(d)
    mask = mask.flatten()
    size = mask.shape[0]
    mask_idx = np.random.choice(size, round(dropout_ratio*size), replace=False) # 通すindexの配列
    mask[mask_idx] = 1
    mask = mask.reshape(d)
    return mask

# バッチ正規化
def batch_normalization(u, gamma, beta, axis=0, u_mean=None, u_var=None):
    if u_mean is None:
        u_mean = np.mean(u, axis=axis, keepdims=True)               # 平均値を求める
    if u_var is None:
        u_var  = np.var(u, axis=axis, keepdims=True)                # 分散を求める
    u_std  = np.maximum(np.sqrt(u_var), 1e-7)                       # 標準偏差を求める
    uh = (u-u_mean)/u_std                                           # 正規化
    return gamma * uh + beta, u_mean, u_var

def batch_normalization_back(dz, u, gamma, beta, axis=0, u_mean=None, u_var=None):
    m = u.shape[axis]                                               # バッチサイズ
    if u_mean is None:
        u_mean = np.mean(u, axis=axis, keepdims=True)               # 平均値を求める
    if u_var is None:
        u_var  = np.var(u, axis=axis, keepdims=True)                # 分散を求める
    u_std  = np.maximum(np.sqrt(u_var), 1e-7)                       # 標準偏差を求める
    uh     = (u-u_mean)/u_std                                       # 正規化
    gdz    = dz * gamma
    dz_sum = np.sum(gdz * (u - u_mean), axis=axis, keepdims=True)
    dn     = (gdz - (uh * dz_sum /(m * u_std)))/ u_std
    dn_sum = np.sum(dn, axis=axis, keepdims=True)
    du     = dn - (dn_sum / m)
    if gamma.ndim == 1:
        dgamma = np.sum(uh * dz)/u.reshape(u.shape[0], -1).shape[1]
        dbeta  = np.sum(dz)/u.reshape(u.shape[0], -1).shape[1]
    else:
        dgamma = np.sum(uh * dz, axis=axis, keepdims=True)
        dbeta  = np.sum(dz, axis=axis, keepdims=True)
    return du, dgamma, dbeta, u_mean, u_var

学習

学習

学習プログラムです。

# 学習関数
model, optimizer, learn_info = learn(model, x_train, t_train, x_test=None, t_test=None, batch_size=100, epoch=50, init_model_flag=True,
          optimizer=None, init_optimizer_flag=True, shuffle_flag=True, learn_info=None)
# 引数
#  model  : モデル
#  x_train : 学習データ
#  t_train : 学習データ正解
#  x_test  : テストデータ(省略可)
#  t_test  : テストデータ正解(省略可)
#  batch_size : バッチサイズ
#  epoch : エポック
#  init_model_flag : モデル初期化フラグ。継続して学習する場合は、Falseを設定
#  optimizer  : オプティマイザ(省略時は、SGD)
#  init_optimizer_flag : オプティマイザ初期化フラグ。継続して学習する場合は、Falseを設定
#  shuffle_flag : シャッフルフラグ
#  learn_info : 学習情報。継続して学習する場合は、前回のlearn_infoを設定
# 戻り値
#  model : モデル
#  optimizer : オプティマイザ
#  learn_info : 学習情報

今までは、学習プログラムの途中に、学習状況を表示するプログラムも同居していましたが、学習状況の表示は、epoch_hook関数として独立しました。

# 学習
def learn(model, x_train, t_train, x_test=None, t_test=None, batch_size=100, epoch=50, init_model_flag=True,
          optimizer=None, init_optimizer_flag=True, shuffle_flag=True, learn_info=None):
    if init_model_flag:
        model = init_model(model)
    if optimizer is None:
        optimizer = create_optimizer(SGD)
    if init_optimizer_flag:
        optimizer = init_optimizer(optimizer, model)

    # 学習情報初期化
    learn_info = epoch_hook(learn_info, epoch, 0, model, x_train, None, t_train, x_test, t_test, batch_size)

    # エポック実行
    for i in range(epoch):
        idx = np.arange(x_train.shape[0])
        if shuffle_flag:
            # データのシャッフル
            np.random.shuffle(idx)

        # 学習
        y_train = np.zeros(t_train.shape)
        for j in range(0, x_train.shape[0], batch_size):
            # propagation
            y_train[idx[j:j+batch_size]], err, us = propagation(model, x_train[idx[j:j+batch_size]], t_train[idx[j:j+batch_size]])
            # back_propagation
            dz, dus = back_propagation(model, x_train[idx[j:j+batch_size]], t_train[idx[j:j+batch_size]], y_train[idx[j:j+batch_size]], us)
            # update_weight
            model, optimizer = update_weight(model, dus, optimizer)

        # 学習情報設定(エポックフック)
        learn_info = epoch_hook(learn_info, epoch, i+1, model, x_train, y_train, t_train, x_test, t_test, batch_size)

    return model, optimizer, learn_info

予測

予測関数です。

# 予測関数
y_pred, err, us = predict(model, x_pred, t_pred=None)
# 引数
#  model  : モデル
#  x_pred : 予測データ
#  t_pred : 予測データ正解(省略可)
# 戻り値
#  y_pred : 予測値
#  err    : 誤差(t_predを省略した場合は、ダミー)
#  us     : 各層の結果
# 予測
def predict(model, x_pred, t_pred=None):
    y_pred, err, us = propagation(model, x_pred, t_pred, learn_flag=False)
    return y_pred, err, us

学習状況表示

エポックごとに学習状況を表示します。
表示する内容は、問題ごとに異なります。

  • 回帰問題 : 誤差
  • 二値分類(出力関数がsigmoid) : 正解率、F値、誤差
  • 多値分類問題 : 正解率、誤差

もう1点大きな変更として、学習データの正解率をエポックごとに再計算していましたが、負荷が大きいため学習時のデータを利用します。学習時のデータを利用するため、今までの値とは異なります。

import time

def epoch_hook(learn_info, epoch, cnt, model, x_train, y_train, t_train, x_test=None, t_test=None, batch_size=100):
    # 初期化
    if learn_info is None:
        learn_info = {}
        learn_info["err_train"] = []
        if x_test is not None:
            learn_info["err_test"] = []
        if model["error"]["func"] == cross_entropy_error:
            learn_info["accuracy_rate_train"] = []
            if x_test is not None:
                 learn_info["accuracy_rate_test"] = []
        if model["output"]["func"] == sigmoid:
            learn_info["f_measure_train"] = []
            if x_test is not None:
                learn_info["f_measure_test"] = []
        # 学習データの予測
        y_train = np.zeros(t_train.shape)
        for j in range(0, x_train.shape[0], batch_size):
            y_train[j:j+batch_size], _, _ = predict(model, x_train[j:j+batch_size], t_train[j:j+batch_size])
        #y_train, err_train, _ = predict(model, x_train, t_train)
        learn_info["total_times"] = 0
    # 情報計算
    if cnt != 0 or len(learn_info["err_train"]) == 0:
        # 再計算する場合
        #y_train = np.zeros(t_train.shape)
        #for j in range(0, x_train.shape[0], batch_size):
        #    y_train[j:j+batch_size], _, _ = predict(model, x_train[j:j+batch_size], x_train[j:j+batch_size])
        #y_train, err_train, _ = predict(model, x_train, t_train)
        learn_info["y_train"] = y_train
        if model["error"]["func"] == cross_entropy_error:
            learn_info["accuracy_rate_train"].append(accuracy_rate(y_train, t_train))
        if model["output"]["func"] == sigmoid:
            learn_info["f_measure_train"].append(f_measure(np.round(y_train), t_train)[0])
        learn_info["err_train"].append(error(model, y_train, t_train))
        if x_test is not None:
            #learn_info["y_test"], err_test, _ = predict(model, x_test, t_test)
            #learn_info["err_test"].append(err_test)
            #  バッチサイズごとに計算する場合
            y_test = np.zeros(t_test.shape)
            for j in range(0, x_test.shape[0], batch_size):
                y_test[j:j+batch_size], _, _ = predict(model, x_test[j:j+batch_size], t_test[j:j+batch_size])
            learn_info["y_test"] = y_test
            if t_test is not None:
                if model["error"]["func"] == cross_entropy_error:
                    learn_info["accuracy_rate_test"].append(accuracy_rate(learn_info["y_test"], t_test))
                if model["output"]["func"] == sigmoid:
                    learn_info["f_measure_test"].append(f_measure(np.round(learn_info["y_test"]), t_test)[0])
                learn_info["err_test"].append(error(model, y_test, t_test))

        # 情報表示
        epoch_cnt = len(learn_info["err_train"])-1
        print(epoch_cnt, end=" ")
        if model["error"]["func"] == cross_entropy_error:
            print(learn_info["accuracy_rate_train"][epoch_cnt], end=" ")
        if model["output"]["func"] == sigmoid:
            print(learn_info["f_measure_train"][epoch_cnt], end=" ")
        print(learn_info["err_train"][epoch_cnt], end=" ")
        if x_test is not None:
            if t_test is not None:
                if model["error"]["func"] == cross_entropy_error:
                    print(learn_info["accuracy_rate_test"][epoch_cnt], end=" ")
                if model["output"]["func"] == sigmoid:
                    print(learn_info["f_measure_test"][epoch_cnt], end=" ")
                print(learn_info["err_test"][epoch_cnt], end=" ")
        print()

    if cnt == 0:
        # 開始時間設定
        learn_info["start_time"] = time.time()
    if cnt == epoch:
        # 所要時間
        learn_info["end_time"] = time.time()
        learn_info["total_times"] = learn_info["total_times"] + learn_info["end_time"] - learn_info["start_time"]
        print("所要時間 = " + str(int(learn_info["total_times"]/60))  +" 分 " + str(int(learn_info["total_times"]%60)) + " 秒")

    return learn_info

正解率、F値を計算する関数

# 正解率
def accuracy_rate(y, t):
    # 2値分類
    if t.shape[t.ndim-1] == 1:
        round_y = np.round(y)
        return np.sum(round_y == t)/y.shape[0]
    # 多クラス分類
    else:
        max_y = np.argmax(y, axis=1)
        max_t = np.argmax(t, axis=1)
        return np.sum(max_y == max_t)/y.shape[0]

# F値
def f_measure(y, t):
    TP = np.sum((y == 1) & (t==1))
    if TP == 0:
        print("TP=0")
        return 0
    FP = np.sum((y == 1) & (t==0))
    TN = np.sum((y == 0) & (t==0))
    FN = np.sum((y == 0) & (t==1))
    precision=TP/(TP+FP)
    recall = TP/(TP+FN)
    f_measure = 2*(precision*recall)/(precision+recall)
    return f_measure, precision, recall, TP, FP, TN, FN

データの正規化

データの正規化は、今までが学習プログラムで行っていましたが、独立します。
テストデータの正規化は、学習データの統計情報を利用しますので、必ず、train_data_normalizeを実行した後に、test_data_normalizeを呼び出してください。

# データ正規化関数
data_normalizer = create_data_normalizer(func, **kwargs)
# 引数
#  func   : データ正規化関数
#  kwargs : データ正規化関数パラメータ
# 戻り値
#  data_normalizer : data_normalizer

# データ正規化関数(学習データ)
nx, data_normalizer = train_data_normalize(data_normalizer, x)
# 引数
#  data_normalizer : data_normalizer
#  x               : 学習データ
# 戻り値
#  nx              : 学習データ(正規化後)
#  data_normalizer : data_normalizer

# データ正規化関数(テストデータ)
nx = test_data_normalize(data_normalizer, x)
# 引数
#  data_normalizer : data_normalizer
#  x               : テストデータ
# 戻り値
#  nx              : テストデータ(正規化後)
def create_data_normalizer(func, **kwargs):
    data_normalizer = {"func":func, "params":kwargs, "data_normalizer_stats":{}}
    return data_normalizer

def train_data_normalize(data_normalizer, x):
    data_normalizer_stats = {}
    nx, data_normalizer["data_normalizer_stats"] = data_normalizer["func"](x, data_normalizer_stats, **data_normalizer["params"])
    return nx, data_normalizer

def test_data_normalize(data_normalizer, x):
    nx, _ = data_normalizer["func"](x, data_normalizer["data_normalizer_stats"], **data_normalizer["params"])
    return nx

正規化関数

def min_max(x, stats, axis=None):
    if "min" not in stats:
        stats["min"] = np.min(x, axis=axis, keepdims=True) # 最小値を求める
    if "max" not in stats:
        stats["max"] = np.max(x, axis=axis, keepdims=True) # 最大値を求める
    return (x-stats["min"])/np.maximum((stats["max"]-stats["min"]),1e-7), stats

def z_score(x, stats, axis=None):
    if "mean" not in stats:
        stats["mean"] = np.mean(x, axis=axis, keepdims=True) # 平均値を求める
    if "std" not in stats:
        stats["std"]  = np.std(x, axis=axis, keepdims=True)  # 標準偏差を求める
    return (x-stats["mean"])/np.maximum(stats["std"],1e-7), stats

実行例

何点か実行してみます。
全体のプログラムは、参考に掲載しています。

データの読み込み、正規化

データ正規化は、独立して学習前に行います。

x_train, t_train, x_test, t_test = load_mnist('c:\\mnist\\')
data_normalizer = create_data_normalizer(min_max)
nx_train, data_normalizer_stats = train_data_normalize(data_normalizer, x_train)
nx_test                         = test_data_normalize(data_normalizer, x_test)

基本の実行

中間層が100,50の場合です。

モデル定義

model = create_model(nx_train.shape[1]) # 28*28
model = add_layer(model, "affine1", affine, 100)
model = add_layer(model, "relu1", relu)
model = add_layer(model, "affine2", affine, 50)
model = add_layer(model, "relu2", relu)
model = add_layer(model, "affine3", affine, 10)
model = set_output(model, softmax)
model = set_error(model, cross_entropy_error)

実行

epoch = 50
batch_size = 100
np.random.seed(10)
model, optimizer, learn_info = learn(model, nx_train, t_train, nx_test, t_test, batch_size=batch_size, epoch=epoch)

結果

input - 0 784
affine1 affine 784 100
relu1 relu 100 100
affine2 affine 100 50
relu2 relu 50 50
affine3 affine 50 10
output softmax 10
error cross_entropy_error
0 0.0813333333333 2.48924898315 0.0838 2.4902096706 
1 0.877283333333 0.420691539071 0.9348 0.224156800349 
2 0.943483333333 0.192000156742 0.9517 0.158824063398 
3 0.958383333333 0.140415289109 0.9557 0.144441912047 
4 0.966533333333 0.112354974838 0.9673 0.109278806451 
5 0.972133333333 0.0932476134434 0.972 0.0968895001334 
6 0.976933333333 0.0792976438043 0.9718 0.0932417094467 
7 0.979283333333 0.0686270650944 0.9718 0.087283363008 
8 0.9818 0.0608155991477 0.9714 0.0882409597344 
9 0.984716666667 0.0530088855034 0.9744 0.0805435423165 
10 0.985866666667 0.0476703077711 0.9762 0.0778492998986 
11 0.987183333333 0.0421470629586 0.9758 0.0766172424727 
12 0.989183333333 0.0378823737229 0.9754 0.0753572960246 
13 0.990216666667 0.0333476051916 0.9769 0.0746480376866 
14 0.991633333333 0.0295584425919 0.977 0.074746519702 
15 0.99235 0.026680339424 0.9764 0.0773430976045 
16 0.993333333333 0.024142958608 0.9775 0.0716534037059 
17 0.9945 0.0214301918558 0.9769 0.0752819896242 
18 0.99525 0.0192128761778 0.978 0.0732409831049 
19 0.995733333333 0.0169450183493 0.9773 0.0747687798584 
20 0.996616666667 0.0148580767811 0.9781 0.0732812134007 
21 0.997233333333 0.0134049315701 0.9777 0.0767947479002 
22 0.997683333333 0.0114475684254 0.9786 0.0725559041059 
23 0.9978 0.0107242273976 0.978 0.073842190983 
24 0.998383333333 0.00941678843059 0.9743 0.0856261402626 
25 0.9988 0.00847727532411 0.9771 0.0783456006851 
26 0.999033333333 0.00739824647842 0.977 0.0755873960436 
27 0.999133333333 0.00663384811547 0.9782 0.0776557087651 
28 0.999283333333 0.00627783458758 0.9766 0.078860156733 
29 0.9995 0.0051216837159 0.9784 0.0777150880162 
30 0.99955 0.00461816773519 0.978 0.0773160189954 
31 0.99975 0.00401691265955 0.9786 0.0772116395866 
32 0.999716666667 0.00377946032851 0.9787 0.0773908750494 
33 0.9998 0.00344856027878 0.9786 0.0786565842937 
34 0.99985 0.00315179806981 0.9788 0.0787258320453 
35 0.999916666667 0.00278573818558 0.9784 0.0796560777359 
36 0.999983333333 0.0025110907253 0.978 0.0797181935584 
37 0.99995 0.00238355797371 0.9781 0.0803929770414 
38 0.999966666667 0.00217442226381 0.9788 0.0809956552007 
39 0.999966666667 0.0020413822011 0.9781 0.0813022496192 
40 0.999983333333 0.0019561012899 0.9784 0.0803598037121 
41 0.999983333333 0.00180255571437 0.9782 0.0811510484325 
42 1.0 0.001701594283 0.9782 0.082003390098 
43 1.0 0.00161173281844 0.9768 0.0832258207727 
44 0.999966666667 0.0015280135421 0.9787 0.084025576588 
45 1.0 0.00147259139201 0.9786 0.0831630500244 
46 1.0 0.00139654618025 0.9784 0.0841561167294 
47 1.0 0.00131650329542 0.9779 0.0847538814986 
48 1.0 0.00127532762374 0.9776 0.0845332904361 
49 1.0 0.00122637918889 0.9785 0.0848684105633 
50 1.0 0.0011690765288 0.9785 0.0853782749166 
所要時間 = 3 分 28 秒

継続実行

先ほどのモデルで、まず、30エポック実行し、後日20エポック実行する場合を考えます。

30エポックの実行

epoch =30
batch_size = 100
np.random.seed(10)
model, optimizer, learn_info = learn(model, nx_train, t_train, nx_test, t_test, batch_size=batch_size, epoch=epoch)

モデルの保存

save_model(model, "model_save_30")

後日実行の内容

モデルの読み込み

model = load_model("model_save_30")

init_model_flagにFalseを設定します。また、learn_infoも継続のため設定します。

20エポック実行

epoch =20
batch_size = 100
model, optimizer, learn_info = learn(model, nx_train, t_train, nx_test, t_test, batch_size=batch_size, epoch=epoch, init_model_flag=False, learn_info=learn_info)

もし、途中から学習率を変更して実行したい場合は、以下のように実行します。

epoch =20
batch_size = 100
optimizer = create_optimizer(SGD, lr=0.01)
model, optimizer, learn_info = learn(model, nx_train, t_train, nx_test, t_test, batch_size=batch_size, epoch=epoch, init_model_flag=False, optimizer=optimizer, learn_info=learn_info)

各種設定

オプティマイザの設定

オプティマイザとして、Adamを設定した例です。

model = create_model(nx_train.shape[1]) # 28*28
model = add_layer(model, "affine1", affine, 100)
model = add_layer(model, "relu1", relu)
model = add_layer(model, "affine2", affine, 50)
model = add_layer(model, "relu2", relu)
model = add_layer(model, "affine3", affine, 10)
model = set_output(model, softmax)
model = set_error(model, cross_entropy_error)
optimizer = create_optimizer(Adam, lr=0.001, beta1=0.9, beta2=0.999)
epoch = 50
batch_size = 100
np.random.seed(10)
model, optimizer, learn_info = learn(model, nx_train, t_train, nx_test, t_test, optimizer=optimizer, batch_size=batch_size, epoch=epoch)

重み減衰の設定

重み減衰を設定した例です。

model = create_model(nx_train.shape[1]) # 28*28
model = add_layer(model, "affine1", affine, 100)
model = add_layer(model, "relu1", relu)
model = add_layer(model, "affine2", affine, 50)
model = add_layer(model, "relu2", relu)
model = add_layer(model, "affine3", affine, 10)
model = set_output(model, softmax)
model = set_error(model, cross_entropy_error)
model = set_weight_decay(model, L2_norm, weight_decay_lambda=0.001)

ドロップアウトの設定

ドロップアウトの設定例です。ドロップアウト率は、層ごとに任意に設定できます。

model = create_model(nx_train.shape[1]) # 28*28
model = add_layer(model, "dropout0", dropout, dropout_ratio=0.9)
model = add_layer(model, "affine1", affine, 100)
model = add_layer(model, "relu1", relu)
model = add_layer(model, "dropout1", dropout, dropout_ratio=0.5)
model = add_layer(model, "affine2", affine, 50)
model = add_layer(model, "relu2", relu)
model = add_layer(model, "dropout2", dropout, dropout_ratio=0.5)
model = add_layer(model, "affine3", affine, 10)
model = set_output(model, softmax)
model = set_error(model, cross_entropy_error)

バッチ正規化の設定

バッチ正規化の設定例です。

model = create_model(nx_train.shape[1]) # 28*28
model = add_layer(model, "affine1", affine, 100)
model = add_layer(model, "batch_norm1", batch_normalization)
model = add_layer(model, "relu1", relu)
model = add_layer(model, "affine2", affine, 50)
model = add_layer(model, "batch_norm2", batch_normalization)
model = add_layer(model, "relu2", relu)
model = add_layer(model, "affine3", affine, 10)
model = set_output(model, softmax)
model = set_error(model, cross_entropy_error)

重みの初期値の設定

affineの重みの初期値は、he_normalです。重みの初期値として、lecun_normalを設定した例です。

model = create_model(nx_train.shape[1]) # 28*28
model = add_layer(model, "affine1", affine, 100, weight_init_func=lecun_normal)
model = add_layer(model, "relu1", relu)
model = add_layer(model, "affine2", affine, 50, weight_init_func=lecun_normal)
model = add_layer(model, "relu2", relu)
model = add_layer(model, "affine3", affine, 10, weight_init_func=lecun_normal)
model = set_output(model, softmax)
model = set_error(model, cross_entropy_error)

性能改善

このままでもよいのですが、性能改善を行います。

1層目の勾配計算省略

逆伝播時の1層目のduは、利用しません。今までは、共通的にすべて逆伝播でduを計算していました。calc_du_flagを設け、1層目の逆伝播時のみ、duを計算しないように実装しました。
今回は、対象をaffineの逆伝播に限定しました。
実際のプログラムは、参考のプログラム全体を参照してください。

エポックごとのテストデータ予測方法の変更

epoch_hookで計算しているテストデータの予測を全データ一括の予測から、バッチサイズごとに分割して予測するように変更しました。

実行

基本の実行が、3分28秒でしたが、2分20秒に短縮されました。結構、1層目の勾配計算は、大きな割合を占めていることがわかりました。

参考

関数仕様

各関数の仕様です。

# モデル作成関数
model = create_model(input_shape)
# 引数
#  input_shape : 入力データの次元
# 戻り値
#  モデル

# 層追加関数
model = add_layer(model, name, func, d=None, **kwargs)
# 引数
#  model : モデル
#  name  : レイヤの名前
#  func  : 中間層の関数
#           affine,sigmoid,tanh,relu,leaky_relu,prelu,rrelu,relun,srelu,elu,maxout,
#           identity,softplus,softsign,step,dropout,batch_normalization
#  d     : ノード数
#           affine,maxoutの場合指定
#  kwargs: 中間層の関数のパラメータ
#           affine - weight_init_func=he_normal, weight_init_params={}, bias_init_func=zeros_b, bias_init_params={}
#                     weight_init_func - lecun_normal,lecun_uniform,glorot_normal,glorot_uniform,he_normal,he_uniform,normal_w,uniform_w,zeros_w,ones_w
#                     weight_init_params
#                       normal_w - mean=0, var=1
#                       uniform_w - min=0, max=1
#                     bias_init_func - normal_b,uniform_b,zeros_b,ones_b
#                     bias_init_params
#                       normal_b - mean=0, var=1
#                       uniform_b - min=0, max=1
#           leaky_relu - alpha
#           rrelu - min=0.0, max=0.1
#           relun - n
#           srelu - alpha
#           elu - alpha
#           maxout - unit=1, weight_init_func=he_normal, weight_init_params={}, bias_init_func=zeros_b, bias_init_params={}
#           dropout - dropout_ratio=0.9
#           batch_normalization - batch_norm_node=True, use_gamma_beta=True, use_train_stats=True, alpha=0.1
# 戻り値
#  モデル

# 出力関数
mode = set_output(model, func, **kwargs)
# 引数
#  model : モデル
#  func  : 出力層の関数
#           softmax,sigmoid,identity
#  kwargs: 出力層の関数のパラメータ
#           なし
# 戻り値
#  モデル

# 損失関数
mode = set_error(model, func, **kwargs)
# 引数
#  model : モデル
#  func  : 損失関数
#           mean_squared_error,cross_entropy_error
#  kwargs: 損失関数のパラメータ
#           なし
# 戻り値
#  モデル

# 重み減衰関数
mode = set_weight_decay(model, func, **kwargs):
# 引数
#  model : モデル
#  func  : 重み減衰関数
#           L1_norm,L2_norm
#  kwargs: 重み減衰関数のパラメータ
#           L1_norm - weight_decay_lambda
#           L2_norm - weight_decay_lambda
# 戻り値
#  モデル

# オプティマイザ作成関数
optimizer = create_optimizer(func, **kwargs)
# 引数
#  func  : オプティマイザ関数
#           SGD,Momentum,AdaGrad,RMSProp,Adadelta,Adam
#           Nesterovは、未サポート
#  kwargs: オプティマイザ関数のパラメータ
#           SGD - lr=0.1
#           Momentum - lr=0.01, mu=0.9
#           AdaGrad - lr=0.01, epsilon=1e-7
#           RMSProp - lr=0.001, gamma=0.9, epsilon=1e-7
#           Adadelta - lr=1.0, gamma=0.9, epsilon=1e-7
#           Adam - lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7
# 戻り値
#  オプティマイザ
# 学習関数
model, optimizer, learn_info = learn(model, x_train, t_train, x_test=None, t_test=None, batch_size=100, epoch=50, init_model_flag=True,
          optimizer=None, init_optimizer_flag=True, shuffle_flag=True, learn_info=None)
# 引数
#  model  : モデル
#  x_train : 学習データ
#  t_train : 学習データ正解
#  x_test  : テストデータ(省略可)
#  t_test  : テストデータ正解(省略可)
#  batch_size : バッチサイズ
#  epoch : エポック
#  init_model_flag : モデル初期化フラグ。継続して学習する場合は、Falseを設定
#  optimizer  : オプティマイザ(省略時は、SGD)
#  init_optimizer_flag : オプティマイザ初期化フラグ。継続して学習する場合は、Falseを設定
#  shuffle_flag : シャッフルフラグ
#  learn_info : 学習情報。継続して学習する場合は、前回のlearn_infoを設定
# 戻り値
#  model : モデル
#  optimizer : オプティマイザ
#  learn_info : 学習情報

# 予測関数
y_pred, err, us = predict(model, x_pred, t_pred=None)
# 引数
#  model  : モデル
#  x_pred : 予測データ
#  t_pred : 予測データ正解(省略可)
# 戻り値
#  y_pred : 予測値
#  err    : 誤差(t_predを省略した場合は、ダミー)
#  us     : 各層の結果
# データ正規化関数
data_normalizer = create_data_normalizer(func, **kwargs)
# 引数
#  func   : データ正規化関数
#            min_max,z_score
#  kwargs : データ正規化関数パラメータ
#            min_max - axis
#            z_score - axis
# 戻り値
#  data_normalizer : data_normalizer

# データ正規化関数(学習データ)
nx, data_normalizer = train_data_normalize(data_normalizer, x)
# 引数
#  data_normalizer : data_normalizer
#  x               : 学習データ
# 戻り値
#  nx              : 学習データ(正規化後)
#  data_normalizer : data_normalizer

# データ正規化関数(テストデータ)
nx = test_data_normalize(data_normalizer, x)
# 引数
#  data_normalizer : data_normalizer
#  x               : テストデータ
# 戻り値
#  nx              : テストデータ(正規化後)

プログラム全体

プログラム全体です。

import gzip
import numpy as np
# MNIST読み込み
def load_mnist( mnist_path ) :
    return _load_image(mnist_path + 'train-images-idx3-ubyte.gz'), \
           _load_label(mnist_path + 'train-labels-idx1-ubyte.gz'), \
           _load_image(mnist_path + 't10k-images-idx3-ubyte.gz'), \
           _load_label(mnist_path + 't10k-labels-idx1-ubyte.gz')
def _load_image( image_path ) :
    # 画像データの読み込み
    with gzip.open(image_path, 'rb') as f:
        buffer = f.read()
    size = np.frombuffer(buffer, np.dtype('>i4'), 1, offset=4)
    rows = np.frombuffer(buffer, np.dtype('>i4'), 1, offset=8)
    columns = np.frombuffer(buffer, np.dtype('>i4'), 1, offset=12)
    data = np.frombuffer(buffer, np.uint8, offset=16)
    image = np.reshape(data, (size[0], rows[0]*columns[0]))
    image = image.astype(np.float32)
    return image
def _load_label( label_path ) :
    # 正解データ読み込み
    with gzip.open(label_path, 'rb') as f:
        buffer = f.read()
    size = np.frombuffer(buffer, np.dtype('>i4'), 1, offset=4)
    data = np.frombuffer(buffer, np.uint8, offset=8)
    label = np.zeros((size[0], 10))
    for i in range(size[0]):
        label[i, data[i]] = 1
    return label

# 正規化関数
def min_max(x, stats, axis=None):
    if "min" not in stats:
        stats["min"] = np.min(x, axis=axis, keepdims=True) # 最小値を求める
    if "max" not in stats:
        stats["max"] = np.max(x, axis=axis, keepdims=True) # 最大値を求める
    return (x-stats["min"])/np.maximum((stats["max"]-stats["min"]),1e-7), stats

def z_score(x, stats, axis=None):
    if "mean" not in stats:
        stats["mean"] = np.mean(x, axis=axis, keepdims=True) # 平均値を求める
    if "std" not in stats:
        stats["std"]  = np.std(x, axis=axis, keepdims=True)  # 標準偏差を求める
    return (x-stats["mean"])/np.maximum(stats["std"],1e-7), stats

# affine変換
def affine(u, W, b):
    return np.dot(u, W) + b

def affine_back(dz, u, W, b, calc_du_flag=True):
    du = None
    if calc_du_flag:                        # 最初の層では、duを計算しない
        du = np.dot(dz, W.T)                # zの勾配は、今までの勾配と重みを掛けた値
    dW = np.dot(u.T, dz)                    # 重みの勾配は、zに今までの勾配を掛けた値
    db = np.dot(np.ones(u.shape[0]).T, dz)  # バイアスの勾配は、今までの勾配の値
    return du, dW, db

# 活性化関数(中間)
def sigmoid(u):
    return 1/(1 + np.exp(-u))

def sigmoid_back(dz, u, z):
    return dz * (z - z**2)

def tanh(u):
    return np.tanh(u)

def tanh_back(dz, u, z):
    return dz * (1 - z**2)
#    return dz * (1/np.cosh(u)**2)

def relu(u):
    return np.maximum(0, u)

def relu_back(dz, u, z):
    return dz * np.where(u > 0, 1, 0)

def leaky_relu(u, alpha):
    return np.maximum(u * alpha, u)

def leaky_relu_back(dz, u, z, alpha):
    return dz * np.where(u > 0, 1, alpha)

def prelu(u, alpha):
    return np.where(u > 0, u, u * alpha )

def prelu_back(dz, u, z, alpha):
    return dz * np.where(u > 0, 1, alpha), np.sum(dz * np.where(u > 0, 0, u), axis=0)/u.shape[0]

def rrelu(u, alpha):
    return np.where(u > 0, u, u * alpha )

def rrelu_back(dz, u, z, alpha):
    return dz * np.where(u > 0, 1, alpha)

def relun(u, n):
    return np.minimum(np.maximum(0, u), n)

def relun_back(dz, u, z, n):
    return dz * np.where(u > n, 0, np.where(u > 0, 1, 0))

def srelu(u, alpha):
    return np.where(u > alpha, u, alpha)

def srelu_back(dz, u, z, alpha):
    return dz * np.where(u > alpha, 1, 0)

def elu(u, alpha):
    return np.where(u > 0, u,  alpha * (np.exp(u) - 1))

def elu_back(dz, u, z, alpha):
    return dz * np.where(u > 0, 1,  alpha * np.exp(u))

def maxout(u, unit):
    u = u.reshape((u.shape[0],int(u.shape[1]/unit),unit))
    return np.max(u, axis=2)

def maxout_back(dz, u, z, unit):
    u = u.reshape((u.shape[0],int(u.shape[1]/unit),unit))
    u_max = np.argmax(u,axis=2)
    du = np.zeros_like(u)
    for i in range(du.shape[0]):
        for j in range(du.shape[1]):
            du[i,j,u_max[i,j]] = dz[i,j]
    return du.reshape((du.shape[0],int(du.shape[1]*unit)))

def identity(u):
    return u

def identity_back(dz, u, z):
    return dz

def softplus(u):
    return np.log(1+np.exp(u))

def softplus_back(dz, u, z):
    return dz * (1/(1 + np.exp(-u)))

def softsign(u):
    return u/(1+np.absolute(u))

def softsign_back(dz, u, z):
    return dz * (1/(1+np.absolute(u))**2)

def step(u):
    return np.where(u > 0, 1, 0)

def step_back(dz, u, x):
    return 0

# 活性化関数(出力)
def softmax(u):
    u = u.T
    max_u = np.max(u, axis=0)
    exp_u = np.exp(u - max_u)
    sum_exp_u = np.sum(exp_u, axis=0)
    y = exp_u/sum_exp_u
    return y.T

# 損失関数
def mean_squared_error(y, t):
    size = 1
    if y.ndim == 2:
        size = y.shape[0]
    return 0.5 * np.sum((y-t)**2)/size

def cross_entropy_error(y, t):
    size = 1
    if y.ndim == 2:
        size = y.shape[0]
    if t.shape[t.ndim-1] == 1:
        # 2値分類
        return -np.sum(t * np.log(np.maximum(y,1e-7)) + (1 - t) * np.log(np.maximum(1 - y,1e-7)))/size
    else:
        # 多クラス分類
        return -np.sum(t * np.log(np.maximum(y,1e-7)))/size

# 活性化関数(出力)+損失関数勾配
def softmax_cross_entropy_error_back(y, u, t):
    size = 1
    if y.ndim == 2:
        size = y.shape[0]
    return (y - t)/size

def sigmoid_cross_entropy_error_back(y, u, t):
    size = 1
    if y.ndim == 2:
        size = y.shape[0]
    return (y - t)/size

def identity_mean_squared_error_back(y, u, t):
    size = 1
    if y.ndim == 2:
        size = y.shape[0]
    return (y - t)/size

# 重み・バイアスの初期化
def lecun_normal(d_1, d):
    var = 1/np.sqrt(d_1)
    return np.random.normal(0, var, (d_1, d))
def lecun_uniform(d_1, d):
    min = -np.sqrt(3/d_1)
    max = np.sqrt(3/d_1)
    return np.random.uniform(min, max, (d_1, d))

def glorot_normal(d_1, d):
    var = np.sqrt(2/(d_1+d))
    return np.random.normal(0, var, (d_1, d))
def glorot_uniform(d_1, d):
    min = -np.sqrt(6/(d_1+d))
    max = np.sqrt(6/(d_1+d))
    return np.random.uniform(min, max, (d_1, d))

def he_normal(d_1, d):
    var = np.sqrt(2/d_1)
    return np.random.normal(0, var, (d_1, d))
def he_uniform(d_1, d):
    min = -np.sqrt(6/d_1)
    max = np.sqrt(6/d_1)
    return np.random.uniform(min, max, (d_1, d))

def normal_w(d_1, d, mean=0, var=1):
    return np.random.normal(mean, var, (d_1, d))
def normal_b(d, params, mean=0, var=1):
    return np.random.normal(mean, var, d)

def uniform_w(d_1, d, min=0, max=1):
    return np.random.uniform(min, max, (d_1, d))
def uniform_b(d, params, min=0, max=1):
    return np.random.uniform(min, max, d)

def zeros_w(d_1, d):
    return np.zeros((d_1, d))
def zeros_b(d):
    return np.zeros(d)

def ones_w(d_1, d):
    return np.ones((d_1, d))
def ones_b(d):
    return np.ones(d)

# 勾配法
def SGD(W, dW, lr=0.1):
    return W - lr*dW, {}

def Momentum(W, dW, lr=0.01, mu=0.9, v=None):
    if v is None: # vの初期値設定
        v = np.zeros_like(W)
    # vの更新
    v = mu * v - lr * dW
    return W + v, {"v":v}

def Nesterov(W, dW, lr=0.01, mu=0.9, v=None):
    if v is None:
        v = np.zeros_like(W)
    v = mu * v - lr * dW
    return W + v, {"v":v}

def AdaGrad(W, dW, lr=0.01, epsilon=1e-7, g=None):
    if g is None:
        g = np.zeros_like(W)
    g =  g + (dW * dW)
    return W - (lr * dW)/np.sqrt(np.maximum(g, epsilon)), {"g":g}

def RMSProp(W, dW, lr=0.001, gamma=0.9, epsilon=1e-7, g=None):
    if g is None:
        g = np.zeros_like(W)
    g =  gamma * g + (1 - gamma) * dW * dW
    return W - (lr * dW)/np.sqrt(np.maximum(g, epsilon)), {"g":g}

def Adadelta(W, dW, lr=1.0, gamma=0.9, epsilon=1e-7, g=None, s=None):
    if g is None:
        g = np.zeros_like(W)
    if s is None:
        s = np.zeros_like(W)
    g = gamma * g + (1 - gamma) * dW * dW
    newW = W - (lr * dW)/np.sqrt(np.maximum(g, epsilon)) * np.sqrt(np.maximum(s, epsilon))
    s = (1 - gamma) * (newW - W) * (newW - W) + gamma * s
    return newW, {"g":g, "s":s}

def Adam(W, dW, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7, k=0, m=None, v=None):
    if m is None:
        m = np.zeros_like(W)
    if v is None:
        v = np.zeros_like(W)
    k = k + 1
    m = beta1 * m + (1 - beta1) * dW
    v = beta2 * v + (1 - beta2) * dW * dW
    hatm = m / (1 - np.power(beta1, k))
    hatv = v / (1 - np.power(beta2, k))
    return W - lr / np.maximum(np.sqrt(hatv), epsilon) * hatm, {"k":k, "m":m, "v":v}

# 重み減衰
def L1_norm(W, weight_decay_lambda):
    r = np.sum(np.absolute(W))
    return r * weight_decay_lambda

def L1_norm_back(W, weight_decay_lambda):
    return np.where(W > 0, weight_decay_lambda, np.where(W < 0, -weight_decay_lambda, 0))

def L2_norm(W, weight_decay_lambda):
    r = np.sum(W**2)
    return (r * weight_decay_lambda)/2

def L2_norm_back(W, weight_decay_lambda):
    return weight_decay_lambda * W

# ドロップアウト
def dropout(z, mask):
    return z * mask

def dropout_back(dz, mask):
    return dz * mask

def set_dropout_mask(d, dropout_ratio):
    mask = np.zeros(d)
    mask = mask.flatten()
    size = mask.shape[0]
    mask_idx = np.random.choice(size, round(dropout_ratio*size), replace=False) # 通すindexの配列
    mask[mask_idx] = 1
    mask = mask.reshape(d)
    return mask

# バッチ正規化
def batch_normalization(u, gamma, beta, axis=0, u_mean=None, u_var=None):
    if u_mean is None:
        u_mean = np.mean(u, axis=axis, keepdims=True)               # 平均値を求める
    if u_var is None:
        u_var  = np.var(u, axis=axis, keepdims=True)                # 分散を求める
    u_std  = np.maximum(np.sqrt(u_var), 1e-7)                       # 標準偏差を求める
    uh = (u-u_mean)/u_std                                           # 正規化
    return gamma * uh + beta, u_mean, u_var

def batch_normalization_back(dz, u, gamma, beta, axis=0, u_mean=None, u_var=None):
    m = u.shape[axis]                                               # バッチサイズ
    if u_mean is None:
        u_mean = np.mean(u, axis=axis, keepdims=True)               # 平均値を求める
    if u_var is None:
        u_var  = np.var(u, axis=axis, keepdims=True)                # 分散を求める
    u_std  = np.maximum(np.sqrt(u_var), 1e-7)                       # 標準偏差を求める
    uh     = (u-u_mean)/u_std                                       # 正規化
    gdz    = dz * gamma
    dz_sum = np.sum(gdz * (u - u_mean), axis=axis, keepdims=True)
    dn     = (gdz - (uh * dz_sum /(m * u_std)))/ u_std
    dn_sum = np.sum(dn, axis=axis, keepdims=True)
    du     = dn - (dn_sum / m)
    if gamma.ndim == 1:
        dgamma = np.sum(uh * dz)/u.reshape(u.shape[0], -1).shape[1]
        dbeta  = np.sum(dz)/u.reshape(u.shape[0], -1).shape[1]
    else:
        dgamma = np.sum(uh * dz, axis=axis, keepdims=True)
        dbeta  = np.sum(dz, axis=axis, keepdims=True)
    return du, dgamma, dbeta, u_mean, u_var

# 正解率
def accuracy_rate(y, t):
    # 2値分類
    if t.shape[t.ndim-1] == 1:
        round_y = np.round(y)
        return np.sum(round_y == t)/y.shape[0]
    # 多クラス分類
    else:
        max_y = np.argmax(y, axis=1)
        max_t = np.argmax(t, axis=1)
        return np.sum(max_y == max_t)/y.shape[0]

# F値
def f_measure(y, t):
    TP = np.sum((y == 1) & (t==1))
    if TP == 0:
        print("TP=0")
        return 0
    FP = np.sum((y == 1) & (t==0))
    TN = np.sum((y == 0) & (t==0))
    FN = np.sum((y == 0) & (t==1))
    precision=TP/(TP+FP)
    recall = TP/(TP+FN)
    f_measure = 2*(precision*recall)/(precision+recall)
    return f_measure, precision, recall, TP, FP, TN, FN
def middle_propagation(func, u, weights, weight_decay, learn_flag, **params):
    z = func(u, **params)
    return {"u":u, "z":z}, 0

def middle_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, **params):
    du = back_func(dz, us["u"], us["z"], **params)
    return {"du":du}

def output_propagation(func, u, learn_flag, **params):
    z = func(u, **params)
    return {"u":u, "z":z}

# affine
def affine_init_layer(d_prev, d, weight_init_func=he_normal, weight_init_params={}, bias_init_func=zeros_b, bias_init_params={}):
    W = weight_init_func(d_prev, d, **weight_init_params)
    b = bias_init_func(d, **bias_init_params)
    return d, {"W":W, "b":b}

def affine_init_optimizer():
    sW = {}
    sb = {}
    return {"sW":sW, "sb":sb}

def affine_propagation(func, u, weights, weight_decay, learn_flag, **params):
    z = func(u, weights["W"], weights["b"])
    # 重み減衰対応
    weight_decay_r = 0
    if weight_decay is not None:
        weight_decay_r = weight_decay["func"](weights["W"], **weight_decay["params"])
    return {"u":u, "z":z}, weight_decay_r

def affine_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, **params):
    du, dW, db = back_func(dz, us["u"], weights["W"], weights["b"], calc_du_flag)
    # 重み減衰対応
    if weight_decay is not None:
        dW = dW + weight_decay["back_func"](weights["W"], **weight_decay["params"])
    return {"du":du, "dW":dW, "db":db}

def affine_update_weight(func, du, weights, optimizer_stats, **params):
    weights["W"], optimizer_stats["sW"] = func(weights["W"], du["dW"], **params, **optimizer_stats["sW"])
    weights["b"], optimizer_stats["sb"] = func(weights["b"], du["db"], **params, **optimizer_stats["sb"])
    return weights, optimizer_stats

# maxout
def maxout_init_layer(d_prev, d, unit=1, weight_init_func=he_normal, weight_init_params={}, bias_init_func=zeros_b, bias_init_params={}):
    W = weight_init_func(d_prev, d*unit, **weight_init_params)
    b = bias_init_func(d*unit, **bias_init_params)
    return d, {"W":W, "b":b}

def maxout_init_optimizer():
    sW = {}
    sb = {}
    return {"sW":sW, "sb":sb}

def maxout_propagation(func, u, weights, weight_decay, learn_flag, unit=1, **params):
    u_affine = affine(u, weights["W"], weights["b"])
    z   = func(u_affine, unit)
    # 重み減衰対応(未対応)
    weight_decay_r = 0
    #if weight_decay is not None:
    #    weight_decay_r = weight_decay["func"](weights["W"], **weight_decay["params"])
    return {"u":u, "z":z, "u_affine":u_affine}, weight_decay_r

def maxout_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, unit=1, **params):
    du_affine = back_func(dz, us["u_affine"], us["z"], unit)
    du, dW, db = affine_back(du_affine, us["u"], weights["W"], weights["b"])
    # 重み減衰対応(未対応)
    #if weight_decay is not None:
    #    dW = dW + weight_decay["back_func"](weights["W"], **weight_decay["params"])
    return {"du":du, "dW":dW, "db":db}

def maxout_update_weight(func, du, weights, optimizer_stats, **params):
    weights["W"], optimizer_stats["sW"] = func(weights["W"], du["dW"], **params, **optimizer_stats["sW"])
    weights["b"], optimizer_stats["sb"] = func(weights["b"], du["db"], **params, **optimizer_stats["sb"])
    return weights, optimizer_stats

#prelu
def prelu_init_layer(d_prev, d):
    alpha = np.zeros(d)
    return d, {"alpha":alpha}

def prelu_init_optimizer():
    salpha = {}
    return {"salpha":salpha}

def prelu_propagation(func, u, weights, weight_decay, learn_flag, **params):
    z = func(u, weights["alpha"])
    return {"u":u, "z":z}, 0

def prelu_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, **params):
    du, dalpha = back_func(dz, us["u"], us["z"], weights["alpha"])
    return {"du":du, "dalpha":dalpha}

def prelu_update_weight(func, du, weights, optimizer_stats, **params):
    weights["alpha"], optimizer_stats["salpha"] = func(weights["alpha"], du["dalpha"], **params, **optimizer_stats["salpha"])
    return weights, optimizer_stats

# rrelu
def rrelu_init_layer(d_prev, d, min=0.0, max=0.1):
    alpha = np.random.uniform(min, max, d)
    return d, {"alpha":alpha}

def rrelu_propagation(func, u, weights, weight_decay, learn_flag, min=0.0, max=0.1):
    z = func(u, weights["alpha"])
    return {"u":u, "z":z}, 0

def rrelu_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, min=0.0, max=0.1):
    du = back_func(dz, us["u"], us["z"], weights["alpha"])
    return {"du":du}

# batch_normalization
def batch_normalization_init_layer(d_prev, d, batch_norm_node=True, use_gamma_beta=True, use_train_stats=True, alpha=0.1):
    # バッチ正規化
    dim = d
    if type(d) == int:
        dim = (d,)
    # gamma,beta初期化
    if batch_norm_node:
        gamma = np.ones((1,) + dim)
        beta  = np.zeros((1,) + dim)
    else:
        gamma = np.ones(1)
        beta  = np.zeros(1)
    # 学習時の統計情報初期化
    train_mean = None
    train_var  = None
    if use_train_stats:
        if batch_norm_node:
            train_mean = np.zeros((1,) + dim)
            train_var  = np.ones((1,) + dim)
        else:
            train_mean = np.zeros(1)
            train_var  = np.ones(1)

    return d, {"gamma":gamma, "beta":beta, "use_gamma_beta":use_gamma_beta,
               "use_train_stats":use_train_stats, "alpha":alpha, "train_mean":train_mean, "train_var":train_var}

def batch_normalization_init_optimizer():
    sgamma = {}
    sbeta = {}
    return {"sgamma":sgamma, "sbeta":sbeta}

def batch_normalization_propagation(func, u, weights, weight_decay, learn_flag, **params):
    if learn_flag or not weights["use_train_stats"]:
        z, u_mean, u_var = func(u, weights["gamma"], weights["beta"])
    else:
        z, u_mean, u_var = func(u, weights["gamma"], weights["beta"], u_mean=weights["train_mean"], u_var=weights["train_var"])
    return {"u":u, "z":z, "u_mean":u_mean, "u_var":u_var}, 0

def batch_normalization_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, **params):
    du, dgamma, dbeta, u_mean, u_var = back_func(dz, us["u"], weights["gamma"], weights["beta"], u_mean=us["u_mean"], u_var=us["u_var"])
    return {"du":du, "dgamma":dgamma, "dbeta":dbeta, "u_mean":u_mean, "u_var":u_var}

def batch_normalization_update_weight(func, du, weights, optimizer_stats, **params):
    if weights["use_gamma_beta"]:
        weights["gamma"], optimizer_stats["sgamma"] = func(weights["gamma"], du["dgamma"], **params, **optimizer_stats["sgamma"])
        weights["beta"],  optimizer_stats["sbeta"]  = func(weights["beta"] , du["dbeta"],  **params, **optimizer_stats["sbeta"])
    if weights["use_train_stats"]:
        weights["train_mean"] = (1 - weights["alpha"]) * weights["train_mean"] + weights["alpha"] * du["u_mean"]
        weights["train_var"]  = (1 - weights["alpha"]) * weights["train_var"]  + weights["alpha"] * du["u_var"]
    return weights, optimizer_stats

# dropout
def dropout_propagation(func, u, weights, weight_decay, learn_flag, dropout_ratio=0.9):
    dropout_mask = None
    if learn_flag:
        dropout_mask = set_dropout_mask(u[-1].shape, dropout_ratio)
        z = func(u, dropout_mask)
    else:
        z = u * dropout_ratio
    return {"u":u, "z":z, "dropout_mask":dropout_mask}, 0

def dropout_back_propagation(back_func, dz, us, weights, weight_decay, calc_du_flag, dropout_ratio=0.9):
    du = back_func(dz, us["dropout_mask"])
    return {"du":du}
from collections import OrderedDict
import pickle

def create_model(d):
    # modelの生成
    model = {}
    model["input"] = {"d":d}
    model["layer"] = OrderedDict()
    model["weight_decay"] = None
    return model

def add_layer(model, name, func, d=None, **kwargs):
    # layerの設定
    model["layer"][name] = {"func":func, "back_func":eval(func.__name__ + "_back"), "d":d, "params":kwargs, "weights":None}
    return model

def set_output(model, func, **kwargs):
    # layerの設定
    model["output"] = {"func":func, "d":1, "params":kwargs}
    return model

def set_error(model, func, **kwargs):
    # layerの設定
    model["error"] = {"func":func, "d":1, "params":kwargs}
    return model

def set_weight_decay(model, func, **kwargs):
    model["weight_decay"] = {"func":func, "back_func":eval(func.__name__ + "_back"), "params":kwargs}
    return model

def init_model(model):
    # modelの初期化
    # input
    d_prev = 0
    d_next = model["input"]["d"]
    print("input", "-", d_prev, d_next)
    # layer
    d_prev = d_next
    # du計算フラグ
    calc_du_flag = False
    for k, v in model["layer"].items():
        # du計算フラグ設定
        v["calc_du_flag"] = calc_du_flag
        # 要素数取得
        d = v["d"] if v["d"] != None else d_prev
        # 重み、バイアスの初期化
        d_next = d
        if v["func"].__name__ + "_init_layer" in globals():
            init_layer_func = eval(v["func"].__name__ + "_init_layer")
            d_next, v["weights"] = init_layer_func(d_prev, d, **v["params"])
            # 以降du計算
            calc_du_flag = True
        # 要素数設定
        v["d_prev"], v["d_next"] = d_prev, d_next
        # モデル表示
        print(k, v["func"].__name__, d_prev, d_next)
        d_prev = d_next
    # output
    model["output"]["d_prev"] = d_prev
    print("output", model["output"]["func"].__name__, d_prev)
    # error
    print("error", model["error"]["func"].__name__)

    return model

def save_model(model, file_path):
    f = open(file_path, "wb")
    pickle.dump(model, f)
    f.close
    return

def load_model(file_path):
    f = open(file_path, "rb")
    model = pickle.load(f)
    f.close
    return model

def create_optimizer(func, **kwargs):
    optimizer = {"func":func, "params":kwargs}
    return optimizer

def init_optimizer(optimizer, model):
    optimizer_stats = {}
    for k, v in model["layer"].items():
        # オプティマイザの初期化
        if v["func"].__name__ + "_init_optimizer" in globals():
            init_optimizer_func = eval(v["func"].__name__ + "_init_optimizer")
            optimizer_stats[k] = init_optimizer_func()
    optimizer["stats"] = optimizer_stats

    return optimizer

def save_optimizer(optimizer, file_path):
    f = open(file_path, "wb")
    pickle.dump(optimizer, f)
    f.close
    return

def load_optimizer(file_path):
    f = open(file_path, "rb")
    optimizer = pickle.load(f)
    f.close
    return optimizer

def propagation(model, x, t=None, learn_flag=True):
    us = {}
    u = x
    err = None
    weight_decay_sum = 0
    # layer
    for k, v in model["layer"].items():
        # propagation関数設定
        propagation_func = middle_propagation
        if v["func"].__name__ + "_propagation" in globals():
            propagation_func = eval(v["func"].__name__ + "_propagation")
        # propagation関数実行
        us[k], weight_decay_r = propagation_func(v["func"], u, v["weights"], model["weight_decay"], learn_flag, **v["params"])
        u = us[k]["z"]  
        weight_decay_sum = weight_decay_sum + weight_decay_r
    # output
    if "output" in model:
        propagation_func = output_propagation
        # propagation関数実行
        us["output"] = propagation_func(model["output"]["func"], u, learn_flag, **model["output"]["params"])
        u = us["output"]["z"]
    # error
    y = u
    # 学習時には、誤差は計算しない
    if learn_flag == False:
        if "error" in model:
            if t is not None:
                err = model["error"]["func"](y, t)
        # 重み減衰
        if "weight_decay" is not None:
            if learn_flag:
                err = err + weight_decay_sum

    return y, err, us

def back_propagation(model, x=None, t=None, y=None, us=None, du=None):
    dus = {}
    if du is None:
        # 出力層+誤差勾配関数
        output_error_back_func = eval(model["output"]["func"].__name__ + "_" + model["error"]["func"].__name__ + "_back")
        du = output_error_back_func(y, us["output"]["u"], t)
        dus["output"] = {"du":du}
    dz = du
    for k, v in reversed(model["layer"].items()):
        # back propagation関数設定
        back_propagation_func = middle_back_propagation
        if v["func"].__name__ + "_back_propagation" in globals():
            back_propagation_func = eval(v["func"].__name__ + "_back_propagation")
        # back propagation関数実行
        dus[k] = back_propagation_func(v["back_func"], dz, us[k], v["weights"], model["weight_decay"], v["calc_du_flag"], **v["params"])
        dz = dus[k]["du"]
        # du計算フラグがFalseだと以降計算しない
        if v["calc_du_flag"] == False:
            break

    return dz, dus

def update_weight(model, dus, optimizer):
    for k, v in model["layer"].items():
        # 重み更新
        if v["func"].__name__ + "_update_weight" in globals():
            update_weight_func = eval(v["func"].__name__ + "_update_weight")
            v["weights"], optimizer["stats"][k] = update_weight_func(optimizer["func"], dus[k], v["weights"], optimizer["stats"][k], **optimizer["params"])
    return model, optimizer

def create_data_normalizer(func, **kwargs):
    data_normalizer = {"func":func, "params":kwargs, "data_normalizer_stats":{}}
    return data_normalizer

def train_data_normalize(data_normalizer, x):
    data_normalizer_stats = {}
    nx, data_normalizer["data_normalizer_stats"] = data_normalizer["func"](x, data_normalizer_stats, **data_normalizer["params"])
    return nx, data_normalizer

def test_data_normalize(data_normalizer, x):
    nx, _ = data_normalizer["func"](x, data_normalizer["data_normalizer_stats"], **data_normalizer["params"])
    return nx

def error(model, y, t):
    return model["error"]["func"](y, t)
# 学習
def learn(model, x_train, t_train, x_test=None, t_test=None, batch_size=100, epoch=50, init_model_flag=True,
          optimizer=None, init_optimizer_flag=True, shuffle_flag=True, learn_info=None):
    if init_model_flag:
        model = init_model(model)
    if optimizer is None:
        optimizer = create_optimizer(SGD)
    if init_optimizer_flag:
        optimizer = init_optimizer(optimizer, model)

    # 学習情報初期化
    learn_info = epoch_hook(learn_info, epoch, 0, model, x_train, None, t_train, x_test, t_test, batch_size)

    # エポック実行
    for i in range(epoch):
        idx = np.arange(x_train.shape[0])
        if shuffle_flag:
            # データのシャッフル
            np.random.shuffle(idx)

        # 学習
        y_train = np.zeros(t_train.shape)
        for j in range(0, x_train.shape[0], batch_size):
            # propagation
            y_train[idx[j:j+batch_size]], err, us = propagation(model, x_train[idx[j:j+batch_size]], t_train[idx[j:j+batch_size]])
            # back_propagation
            dz, dus = back_propagation(model, x_train[idx[j:j+batch_size]], t_train[idx[j:j+batch_size]], y_train[idx[j:j+batch_size]], us)
            # update_weight
            model, optimizer = update_weight(model, dus, optimizer)

        # 学習情報設定(エポックフック)
        learn_info = epoch_hook(learn_info, epoch, i+1, model, x_train, y_train, t_train, x_test, t_test, batch_size)

    return model, optimizer, learn_info

# 予測
def predict(model, x_pred, t_pred=None):
    y_pred, err, us = propagation(model, x_pred, t_pred, learn_flag=False)
    return y_pred, err, us
import time

def epoch_hook(learn_info, epoch, cnt, model, x_train, y_train, t_train, x_test=None, t_test=None, batch_size=100):
    # 初期化
    if learn_info is None:
        learn_info = {}
        learn_info["err_train"] = []
        if x_test is not None:
            learn_info["err_test"] = []
        if model["error"]["func"] == cross_entropy_error:
            learn_info["accuracy_rate_train"] = []
            if x_test is not None:
                 learn_info["accuracy_rate_test"] = []
        if model["output"]["func"] == sigmoid:
            learn_info["f_measure_train"] = []
            if x_test is not None:
                learn_info["f_measure_test"] = []
        # 学習データの予測
        y_train = np.zeros(t_train.shape)
        for j in range(0, x_train.shape[0], batch_size):
            y_train[j:j+batch_size], _, _ = predict(model, x_train[j:j+batch_size], t_train[j:j+batch_size])
        #y_train, err_train, _ = predict(model, x_train, t_train)
        learn_info["total_times"] = 0
    # 情報計算
    if cnt != 0 or len(learn_info["err_train"]) == 0:
        # 再計算する場合
        #y_train = np.zeros(t_train.shape)
        #for j in range(0, x_train.shape[0], batch_size):
        #    y_train[j:j+batch_size], _, _ = predict(model, x_train[j:j+batch_size], x_train[j:j+batch_size])
        #y_train, err_train, _ = predict(model, x_train, t_train)
        learn_info["y_train"] = y_train
        if model["error"]["func"] == cross_entropy_error:
            learn_info["accuracy_rate_train"].append(accuracy_rate(y_train, t_train))
        if model["output"]["func"] == sigmoid:
            learn_info["f_measure_train"].append(f_measure(np.round(y_train), t_train)[0])
        learn_info["err_train"].append(error(model, y_train, t_train))
        if x_test is not None:
            #learn_info["y_test"], err_test, _ = predict(model, x_test, t_test)
            #learn_info["err_test"].append(err_test)
            #  バッチサイズごとに計算する場合
            y_test = np.zeros(t_test.shape)
            for j in range(0, x_test.shape[0], batch_size):
                y_test[j:j+batch_size], _, _ = predict(model, x_test[j:j+batch_size], t_test[j:j+batch_size])
            learn_info["y_test"] = y_test
            if t_test is not None:
                if model["error"]["func"] == cross_entropy_error:
                    learn_info["accuracy_rate_test"].append(accuracy_rate(learn_info["y_test"], t_test))
                if model["output"]["func"] == sigmoid:
                    learn_info["f_measure_test"].append(f_measure(np.round(learn_info["y_test"]), t_test)[0])
                learn_info["err_test"].append(error(model, y_test, t_test))

        # 情報表示
        epoch_cnt = len(learn_info["err_train"])-1
        print(epoch_cnt, end=" ")
        if model["error"]["func"] == cross_entropy_error:
            print(learn_info["accuracy_rate_train"][epoch_cnt], end=" ")
        if model["output"]["func"] == sigmoid:
            print(learn_info["f_measure_train"][epoch_cnt], end=" ")
        print(learn_info["err_train"][epoch_cnt], end=" ")
        if x_test is not None:
            if t_test is not None:
                if model["error"]["func"] == cross_entropy_error:
                    print(learn_info["accuracy_rate_test"][epoch_cnt], end=" ")
                if model["output"]["func"] == sigmoid:
                    print(learn_info["f_measure_test"][epoch_cnt], end=" ")
                print(learn_info["err_test"][epoch_cnt], end=" ")
        print()

    if cnt == 0:
        # 開始時間設定
        learn_info["start_time"] = time.time()
    if cnt == epoch:
        # 所要時間
        learn_info["end_time"] = time.time()
        learn_info["total_times"] = learn_info["total_times"] + learn_info["end_time"] - learn_info["start_time"]
        print("所要時間 = " + str(int(learn_info["total_times"]/60))  +" 分 " + str(int(learn_info["total_times"]%60)) + " 秒")

    return learn_info

今後、CNN、RNNを実装していきます。

改版履歴
2018/11/13 y_trainがソート後のデータとなっていたためlearn関数変更
2019/01/04 learn関数にepochパラメータが漏れていたため追加
2019/01/20 lrの既定値をoptimizerごとの設定に変更
2019/01/23 バッチ正規化の予測時に予測データの平均、分散を利用していたが、学習時の平均、分散を利用するように変更
2019/02/04,06 性能改善追加
2019/02/17 predictで、usを返却するように変更

1
1
1

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
1
1