LoginSignup
4
4

More than 1 year has passed since last update.

python MeCab で listの文章を一部を取り出し機械学習

Last updated at Posted at 2022-08-10

モジュールのimport

最終的に機械学習も使用予定のため多めにimport してます

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set() 
%matplotlib inline
import codecs
import os
import datetime
import requests
import re
import MeCab
import unidic
mecab = MeCab.Tagger()
from sklearn.model_selection import train_test_split

CSVデータの読み込み

CSVデータ → Work_Report2 という pandas へ

def open_send_data(image_file_path_v):
    with codecs.open(image_file_path_v, "r", "Shift-JIS", "ignore") as file:
            dfpp = pd.read_table(file, delimiter=",")    
    dfpp_m_send = dfpp
    return dfpp_m_send

image_file_path2 = './input/data/Work_report_simulation_data.csv'
Work_Report=open_send_data(image_file_path2)
Work_Report2=Work_Report.fillna('missing')

今回は 作業報告の 文章を取り出す対象とする 

pandas → list へ

tagger = MeCab.Tagger('')
# 安定する
tagger.parse('')
list_data=[]
text_news=Work_Report2
list_of_single_column = text_news['原因 処置'].tolist()
Work_report_Action_list = text_news['原因 処置'].tolist()

リストから文章の一部を取り出す関数の作成

作業報告の文章から'動詞', '形容詞', '名詞','助詞','助動詞'を取り出します!

def retrieve_text(Work_report_Action_list):
    #テキストの除去
    def clean_text(text):
        replaced_text = text.lower()
        replaced_text = re.sub(r'[【】]', ' ', replaced_text)       # 【】 <>"の除去
        replaced_text = re.sub(r'[()()]', ' ', replaced_text)     # ()の除去
        replaced_text = re.sub(r'[[]\[\]]', ' ', replaced_text)   # []の除去
        replaced_text = re.sub(r'[@@]\w+', '', replaced_text)  # メンションの除去
        replaced_text = re.sub(
            r'https?:\/\/.*?[\r\n ]', '', replaced_text)  # URLの除去
        replaced_text = re.sub(r' ', ' ', replaced_text)  # 全角空白の除去
        return replaced_text

    clean_text_list=[]

    #前処理実施
    for i in Work_report_Action_list:
        clean_text_list_0=clean_text(str(i))
        clean_text_list.append(i)


    # 取り出したい品詞
    select_conditions = ['動詞', '形容詞', '名詞',"助詞","助動詞"]
    Work_Report2["clean_text"]=clean_text_list
    noun_verb=[]

    def wakati_text(text):
        # 分けてノードごとにする
        node = tagger.parseToNode(text)
        terms = []
        while node:
            # 単語
            term = node.surface
            # 品詞
            pos = node.feature.split(',')[0]
            # もし品詞が条件と一致してたら
            if pos in select_conditions:
                terms.append(term)
                list_data.append(term)
            node = node.next
        #連結を実行
        text_result = ' '.join(terms)
        return text_result

    for i, name in enumerate(clean_text_list, 0):
        wakati_text1=wakati_text(clean_text_list[i])
        noun_verb.append(wakati_text1)
        
    return noun_verb

noun_verb = retrieve_text(Work_report_Action_list)

列を追加して前処理

前処理にpycaret を使用します

目的変数 の場所に予測したい列のカラム名を入力してください

#列の追加
Work_Report2["clean_text"]=noun_verb
refining_df=Work_Report2.drop(['原因 処置'], axis=1)
refining_dummies_df=pd.get_dummies(refining_df['作業報告種類'])
refining_dummies_df["clean_text"]=refining_df["clean_text"]
refining_dummies_df1=refining_dummies_df
#前処理にpycaret を使用する
from pycaret.classification import *
exp_name = setup(refining_dummies_df1, target = "目的変数",session_id=42)
best = compare_models()

pycaret で処理したデータを取り出す

get_configで取り出すことが可能です

X = get_config('X')
y = get_config('y')
X_train = get_config('X_train') # 分割後の学習用説明変数
X_train.head()
y_train = get_config('y_train') # 分割後の学習用目的変数
y_train.head()
X_test = get_config('X_test') # 分割後のテスト用説明変数
X_test.head()
y_test = get_config('y_test') # 分割後のテスト用目的変数
y_test.head()

test data を train と val に分ける

# 分類するための関数を定義
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0) 
def data_split(X,y):
    for train_index, test_index in sss.split(X, y):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

    X_train = pd.DataFrame(X_train, columns=columns_name)
    X_test = pd.DataFrame(X_test, columns=columns_name)

    return X_train, y_train, X_test, y_test

#テストデータを分割
X_train, y_train, X_val, y_val = data_split(X_train.values, y_train.values)

機械学習(light GBM) の利用

import lightgbm as lgb

# データセットを作成
train = lgb.Dataset(X_train, label=y_train)
valid = lgb.Dataset(X_val, label=y_val)

# モデルのパラメータを設定
params = {
    'reg_lambda' : 0.2,
    'objective': 'multiclass',
    'metric': 'multi_logloss',
    'num_class': 2,
    'reg_alpha': 0.1,
    'min_data_leaf': 100,
    'learning_rate': 0.025,
#     'feature_fraction': 0.8,
#     'bagging_fraction': 0.8
}
# モデルを訓練
model = lgb.train(params,
                  train,
                  valid_sets=valid,
                  num_boost_round=5000,
                  early_stopping_rounds=500)

# 予測
y_pred = model.predict(X_test, num_iteration=model.best_iteration)
#y_pred = (lgb.predict_proba(X_test)[:, 0] < 0.1).astype(int)
y_pred = np.argmax(y_pred, axis=1)

#--------------------------モデルの評価-----------------------------------------------
from sklearn.metrics import confusion_matrix
from sklearn.metrics import cohen_kappa_score

# 混合行列を作成
result_matrix = pd.DataFrame(confusion_matrix(y_test,y_pred))
# クラス毎の正解率を計算
class_accuracy = [(result_matrix[i][i]/result_matrix[i].sum())*1 for i in range(len(result_matrix))]
result_matrix[2] = class_accuracy
#kappa係数を計算
kappa = cohen_kappa_score(y_test,y_pred)
print("kappa score:",kappa)

評価の実施

from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
confusion_matrix_display=confusion_matrix(y_test, y_pred)
print(confusion_matrix_display)
from sklearn.metrics import accuracy_score
accuracy_score_display=accuracy_score(y_test, y_pred)
print("accuracy_score_display",accuracy_score_display)
from sklearn.metrics import precision_score
precision_score_display=precision_score(y_test, y_pred)
print("precision_score_display",precision_score_display)
from sklearn.metrics import recall_score
recall_score_display=recall_score(y_test, y_pred)
print("recall_score_display",recall_score_display)
from sklearn.metrics import f1_score
f1_score_display=f1_score(y_test, y_pred)
print("f1_score_display",f1_score_display)


#2変数間に、どの程度、順位づけの直線関係があるかを調べる際に使う分析手段がスピアマンの順位相関
from scipy.stats import spearmanr
correlation, pvalue = spearmanr(y_test,y_pred)
print("correlation",correlation) #0.6727272727272726

df_Df = pd.DataFrame({'y_test_':y_test,'y_pred_':y_pred})
df_Df.to_csv(r""+"./output/output.csv",encoding = 'shift-jis')

importance = pd.DataFrame(model.feature_importance(importance_type='gain'), columns=['importance'])
#display(importance)
C_you=merge_data.drop(["y"], axis=1)
importance["columns"] =list(C_you.columns)
importance_sort=importance.sort_values(by='importance',ascending=False)
importance_sort.to_csv(r""+"./output/importance"+"output"+'.csv', encoding = 'shift-jis')

pycaret についての応用は以下のサイトがおすすめです

4
4
2

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
4
4