1
1

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

More than 5 years have passed since last update.

titanic挑戦4

Posted at

・ハイパーパラメータとは?
機械学習アルゴリズムにおいて人が調整するべきパラメータ
ex)ディープラーニングにおける層の数
今回は2種類の方法でハイパーパラメータを探索する
1,グリッドサーチ(いくつかの値の全組み合わせを試し最適な値を探す)
2,ベイズ最適化によるパラメータサーチ

探索範囲について

all_params = {
    'max_depth': [3, 5, 7],
    'learning_rate': [0.01, 0.1],
    'min_child_weight': [3, 5, 10],
    'n_estimetors': [10000],
    'colsample_bytree': [0.8, 0.9, 1.0],
    'colsample_bylevel': [0.8, 0.9, 1.0],
    'reg_alpha': [0, 0.1],
    'random_state': [0],
    'n_jobs': [1],
}

グリッドサーチによるパラメーターサーチ
(sklearn.model_selection.ParameterGrid)[https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ParameterGrid.html] に上記の辞書を与えると全通りのパラメーターを作ってくれます。
以下は検証のミニコード

from sklearn.model_selection import ParameterGrid
all_params = {
    'max_depth': [3, 5, 7],
    'learning_rate': [0.01, 0.1],
    'min_child_weight': [3, 5, 10],
    'n_estimetors': [10000],
    'colsample_bytree': [0.8, 0.9, 1.0],
    'colsample_bylevel': [0.8, 0.9, 1.0],
    'reg_alpha': [0, 0.1],
    'random_state': [0],
    'n_jobs': [1],
}

for params in ParameterGrid(all_params):
    print(params)
    break # ここコメントアウトすると全部のパラメーターが出力されていることがわかります。

結果

{'colsample_bylevel': 0.8, 'colsample_bytree': 0.8, 'learning_rate': 0.01, 'max_depth': 3, 'min_child_weight': 3, 'n_estimetors': 10000, 'n_jobs': 1, 'random_state': 0, 'reg_alpha': 0}

実際のコードで試してみる

%matplotlib inline
import warnings
import numpy as np
import pandas as pd
import xgboost as xgb
import matplotlib.pylab as plt

from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import ParameterGrid

# 組み合わせが多いので、進捗を可視化するツールを入れました。
from tqdm import tqdm_notebook as tqdm

warnings.filterwarnings('ignore')

# all_paramsはグローバル変数として宣言
all_params = {
    'max_depth': [3, 5, 7],
    'learning_rate': [0.01, 0.1],
    'min_child_weight': [3, 5, 10],
    'n_estimetors': [10000],
    'colsample_bytree': [0.8, 0.9, 1.0],
    'colsample_bylevel': [0.8, 0.9, 1.0],
    'reg_alpha': [0, 0.1],
    'random_state': [0],
    'n_jobs': [1],
}


def validate(train_x, train_y, params):
    accuracies = []
    feature_importances = []

    cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=0)
    for train_idx, test_idx in cv.split(train_x, train_y):
        trn_x = train_x.iloc[train_idx, :]
        val_x = train_x.iloc[test_idx, :]

        trn_y = train_y.iloc[train_idx]
        val_y = train_y.iloc[test_idx]

        clf = xgb.XGBClassifier(**params)
        clf.fit(trn_x, trn_y)

        pred_y = clf.predict(val_x)
        feature_importances.append(clf.feature_importances_)
        accuracies.append(accuracy_score(val_y, pred_y))
    print(np.mean(accuracies))
    return accuracies, feature_importances


def plot_feature_importances(feature_importances, cols):
    df_fimp = pd.DataFrame(feature_importances, columns=cols)
    df_fimp.plot(kind="box", rot=90)


def preprocess_df(df):
    # CabinはこのあとDropするので、コードから削除
    df["Age"] = df["Age"].fillna(df["Age"].mean())
    df["Embarked"] = df["Embarked"].fillna(df["Embarked"].mode())
    df["FamilySize"] = df["SibSp"] + df["Parch"] + 1
   
    # 列の削除
    df.drop(["Name", "Ticket", "Cabin", "PassengerId"], axis=1, inplace=True)

    # Sexの01化とEmbarkedのダミー化 
    df["Sex"] = df["Sex"].replace({"male": 0, "female": 1})
    df = pd.get_dummies(df)

    return df


# test dataのpredict
def predict_df(train_x, train_y, test_x, df_test_raw, path_output="result.csv"):
    params = {'learning_rate': 0.008306052798923729, 'max_depth': 7, 'min_child_weight': 3, 'colsample_bytree': 0.8210307463506532, 'colsample_bylevel': 0.8061816543590015}
    clf = xgb.XGBClassifier(**params)
    clf.fit(train_x, train_y)
    preds = clf.predict(test_x)
    
    _df = pd.DataFrame()
    _df["PassengerId"] = df_test_raw["PassengerId"]
    _df["Survived"] = preds
    _df.to_csv(path_output, index=False)


def main():
    df_train = pd.read_csv("train.csv")

    # ここは前処理
    train_y = df_train["Survived"]
    train_x = df_train.drop("Survived", axis=1)

    train_x = preprocess_df(train_x)
    accuracies, feature_importances = validate(train_x, train_y, {})
    print(np.mean(accuracies))
    plot_feature_importances(feature_importances, train_x.columns)

    flag_product = True
    if flag_product:
        df_test = pd.read_csv("test.csv")
        df_test_raw = df_test.copy()
        test_x = preprocess_df(df_test)
        predict_df(train_x, train_y, test_x, df_test_raw, "result.csv")

# main文を書き換えているので、別関数として定義
def main_parametersearch():
    df_train = pd.read_csv("train.csv")
    train_y = df_train["Survived"]
    train_x = df_train.drop("Survived", axis=1)
    train_x = preprocess_df(train_x)

    # ここまではmainと同じ
    # tqdmで囲むことで、進捗を可視化できます。
    best_score = 0
    best_params = {}
    for params in tqdm(ParameterGrid(all_params)):
        accuracies, feature_importances = validate(train_x, train_y, params)
        
        # もしaccuracyの平均値が最大だった場合、
        # best_scoreを更新して、best_paramsを更新する。
        if np.mean(accuracies) > best_score:
            best_score = np.mean(accuracies)
            best_params = params
    print(best_score, best_params)

# 呼んでいる関数を変えた
if __name__ == '__main__':
    main_parametersearch()

ベイズ最適化によるパラメーターサーチ
ベイズ最適化はハイパーパラメーターをより効率的に探してくれるためのアルゴリズムhttps://datachemeng.com/bayesianoptimization/
イメージとしては
1,よい精度を出したところを深く探索する
2,たまにハイパーパラメーターを変えてもっと深いところがないか探索する
のふたつを組み合わせることで最適なパラメーターを探索する。
今回はhttps://optuna.org/を利用する

!pip install optuna # ライブラリーのインストールコマンド
import optuna
import numpy as np
import pandas as pd
import xgboost as xgb

from tqdm import tqdm_notebook as tqdm
from IPython.display import display
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold


# optunaの出力をsupressする
# https://optuna.readthedocs.io/en/stable/faq.html#how-to-suppress-log-messages-of-optuna
optuna.logging.set_verbosity(optuna.logging.WARNING)

def objective(trial):
    params = {
        'seed': 0,
        'learning_rate': trial.suggest_loguniform('learning_rate', 1e-3, 1e-2),
        'max_depth': trial.suggest_int('max_depth', 3, 7),
        'min_child_weight': trial.suggest_int('min_child_weight', 3, 10),
        'colsample_bytree': trial.suggest_loguniform('colsample_bytree', 0.8, 1.0),
        'colsample_bylevel': trial.suggest_loguniform('colsample_bylevel', 0.8, 1.0),
    }

    cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=0)
    accuracies = []
    for train_idx, test_idx in cv.split(train_x, train_y):
        trn_x = train_x.iloc[train_idx, :]
        val_x = train_x.iloc[test_idx, :]

        trn_y = train_y.iloc[train_idx]
        val_y = train_y.iloc[test_idx]

        # main - Predict
        clf = xgb.XGBClassifier(**params)
        clf.fit(trn_x, trn_y)

        pred_y = clf.predict(val_x)
        accuracies.append(accuracy_score(val_y, pred_y))

    return 1.0 - np.mean(accuracies)


def preprocess_df(df):
    # CabinはこのあとDropするので、コードから削除
    df["Age"] = df["Age"].fillna(df["Age"].mean())
    df["Embarked"] = df["Embarked"].fillna(df["Embarked"].mode())
    df["FamilySize"] = df["SibSp"] + df["Parch"] + 1
   
    # 列の削除
    df.drop(["Name", "Ticket", "Cabin", "PassengerId"], axis=1, inplace=True)

    # Sexの01化とEmbarkedのダミー化 
    df["Sex"] = df["Sex"].replace({"male": 0, "female": 1})
    df = pd.get_dummies(df)

    return df


# main
df_train = pd.read_csv("train.csv")
train_y = df_train["Survived"]
train_x = df_train.drop("Survived", axis=1)
train_x = preprocess_df(train_x)

# random_stateを固定する
# 実際は要らないですが、今回はチュートリアルなので導入しています。
# https://optuna.readthedocs.io/en/stable/faq.html#how-can-i-obtain-reproducible-optimization-results
sampler = optuna.samplers.TPESampler(seed=100) # Make the sampler behave in a deterministic way.
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=100, n_jobs=1)
print(study.best_trial.value)
print(study.best_trial.params)

基本的には
1,精度を返す関数をつくる(objective関数)
2,その関数をOptunaに投げる(study.optimizeの引数にとる)
の2工程で対応できる

実際にtest.csvも予測してみる

params = study.best_trial.params

def main():
    df_train = pd.read_csv("train.csv")

    train_y = df_train["Survived"]
    train_x = df_train.drop("Survived", axis=1)

    train_x = preprocess_df(train_x)
    accuracies, feature_importances = validate(train_x, train_y, params) # paramsに書き換えました。
    print(np.mean(accuracies))
    plot_feature_importances(feature_importances, train_x.columns)

    flag_product = True
    if flag_product:
        df_test = pd.read_csv("test.csv")
        df_test_raw = df_test.copy()
        test_x = preprocess_df(df_test)
        predict_df(train_x, train_y, test_x, df_test_raw, "result.csv")

if __name__ == "__main__":
    main()

結果

0.8249158249158249

image.png

アンサンブルによる精度向上
機械学習において単一のモデルをそのまま使うのではなく複数のモデルを組みあわせることで精度を上げる手法をアンサンブル学習という。

ここではLightGBMとXGBoostを組み合わせて精度がよくなるかをみる

# 今回はクロスバリデーションで精度を出す以外のところは削っています。
import warnings
import numpy as np
import pandas as pd
import xgboost as xgb
import lightgbm as lgb

from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold

warnings.filterwarnings('ignore')

# main文
df_train = pd.read_csv("train.csv")
train_y = df_train["Survived"]
train_x = df_train.drop("Survived", axis=1)

train_x = preprocess_df(train_x)

accuracies = []

cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=0)
for train_idx, test_idx in cv.split(train_x, train_y):
    trn_x = train_x.iloc[train_idx, :]
    val_x = train_x.iloc[test_idx, :]

    trn_y = train_y.iloc[train_idx]
    val_y = train_y.iloc[test_idx]

    clf_xgb = xgb.XGBClassifier(**params)
    clf_lgb = lgb.LGBMClassifier(**params)

    clf_xgb.fit(trn_x, trn_y)
    clf_lgb.fit(trn_x, trn_y)
    
    # 平均値化するためにprobabilityを出した
    pred_proba_y_xgb = clf_xgb.predict_proba(val_x)[:, 1]
    pred_proba_y_lgb = clf_lgb.predict_proba(val_x)[:, 1]
    
    # probabilityの平均値が0.50を超えていれば1, そうでないなら0
    pred_proba_y = pd.DataFrame({"xgb": pred_proba_y_xgb, "lgb": pred_proba_y_lgb}).mean(axis=1)
    pred_y = [1 if proba > 0.50 else 0 for proba in pred_proba_y]
    accuracies.append(accuracy_score(val_y, pred_y))

print(np.mean(accuracies))

結果

0.8215488215488215

今回は下がってしまった。
今回は単純に平均値をとっただけだが、実際は複数の判別モデルから出力された値をさらに機械学習に入れ込むStackingなど様々な手法がとられている

1
1
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
1
1

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?