LoginSignup
8
7

More than 5 years have passed since last update.

続・わかりやすい パターン認識 教師なし学習入門 [前向きアルゴリズム・後ろ向きアルゴリズムの実験]

Last updated at Posted at 2016-05-21

隠れマルコフモデル・前向きアルゴリズム・後ろ向きアルゴリズムをそれぞれPythonを使って実装してみた

わかりやすいパターン認識

隠れマルコフモデル・前向きアルゴリズム・後ろ向きアルゴリズム

numpyでシンプルに実装したものは一番下に記述しています。

c = 3                    # 状態数
s = [ "w1", "w2", "w3" ] # 状態
m = 2                    # 出力数
v = [ "v1", "v2" ]       # 出力記号 

# それぞれの状態の初期確率
p = [ 1/3, 1/3, 1/3 ]    
# 状態遷移確率
A = [ [ 0.1, 0.7, 0.2 ],
      [ 0.2, 0.1, 0.7 ],
      [ 0.7, 0.2, 0.1 ] ]
# 状態毎のでのそれぞれの観測確率
B = [ [ 0.9, 0.1 ],
      [ 0.6, 0.4 ],
      [ 0.1, 0.9 ] ]

# p1をp[0], p2をp[1], p3をp[2]
# v1をv[0], v2をv[1]
# x1をx[0], x2をx[1], x3をx[2]
# として以下表現する
# 観測結果
x = [ 0, 1, 0 ] # v1, v2, v1という結果
n = len(x)      # 観測回数

class HMM:
    t = 1
    def __init__(self, s, v, p, A, B): 
        self.outputs = v
        self.states = s
        self.state_probabilities = p
        self.transition_probabilities = A
        self.emission_probabilities = B

    def forward(self, observations):
        self.t = 1
        alpha = []
        alpha.append({})

        # Step1 初期化
        # 1回目なのでt = 1
        # x1 = v1
        for index, state in enumerate(self.states):
            alpha[0][index] = round(self.state_probabilities[index] * self.emission_probabilities[index][observations[0]], 3)

        # Step2
        def calc_probability(i):
            prob = 0
            for j, state in enumerate(self.states):
                prob = prob + alpha[self.t - 2][j] * self.transition_probabilities[i][j]
            return round(prob, 4)

        for observation in observations[1:]:
            alpha.append({})
            self.t = self.t + 1
            observation_index = observations[self.t - 1]
            for i, state in enumerate(self.states):
                b = self.emission_probabilities[i][observation_index]
                alpha[self.t - 1][i] = round(calc_probability(i) * b, 4)
        # Step3
        results = 0
        for index in range(len(observations)):
            results = results + alpha[-1][index]
        return round(results, 3)

    def backward(self, observations):
        self.t = 1
        # Step1 初期化
        beta = []
        for index in range(len(observations)):
            beta.append([0 for x in range(len(self.states))])
        for index in range(len(beta[-1])):
            beta[-1][index] = 1

        # Step2
        def calc_probability(i, observations):
            prob = 0
            for j, state_j in enumerate(self.states):
                t_index = self.t
                a = self.transition_probabilities[i][j]
                b = self.emission_probabilities[j][observations[t_index]]
                prob = prob + (a * b * beta[t_index][j])
            return round(prob, 4)

        # Step3
        results = 0
        for t in range((n-1), 0, -1):
            self.t = t
            for i, state_i in enumerate(self.states):
                beta[t - 1][i] = calc_probability(i, observations)
        result = 0
        for i, state_i in enumerate(self.states):
            result = result + self.state_probabilities[i] * self.emission_probabilities[i][observations[0]] * beta[0][i]

        return round(result, 3)

if __name__=="__main__":
    hmm = HMM(s, v, p, A, B)
    print(hmm.backward(x))
    print(hmm.forward(x))

numpyを使って実装

import numpy as np

s_symbol = [ "w1", "w2", "w3" ] # 状態
c = len(s_symbol)               # 状態数
v_symbol = [ "v1", "v2" ]       # 出力記号
m = len(v_symbol)               # 出力数

# 遷移確率行列
A = np.array([[0.1, 0.7, 0.2], [0.2, 0.1, 0.7], [0.7, 0.2, 0.1]])
# 出力確率行列
B = np.array([[0.9, 0.1], [0.6, 0.4], [0.1, 0.9]])
# 初期確率行列
p = np.array([1/3, 1/3, 1/3])

class HMM:

    def __init__(self, p, A, B): 
        self.p = p
        self.A = A
        self.B = B

    def forward(self, x, c):
        n = len(x) # 観測回数

        # 前向きアルゴリズム
        alpha = np.zeros((n, c))

        # Step1
        alpha[0, :] = p[:] * B[:, x[0]]

        # Step2
        for t in range(1, n):
            alpha[t, :] = np.dot(alpha[t-1, :], A) * B[:, x[t]]

        # Step3
        return round(np.sum(alpha[-1]), 3)

    def backward(self, x, c):
        n = len(x) # 観測回数

        # 後ろ向きアルゴリズム
        # Step1
        beta = np.zeros((n, c))
        beta[-1, :] = 1

        # Step2
        for t in range((n-1), 0, -1):
            beta[t-1, :] = np.dot(self.A, (self.B[:, x[t]] * beta[t, :]))

        # Step3
        return round(sum(p[:] * self.B[:, x[0]] * beta[0, :]), 3)

if __name__=="__main__":
    # 観測結果
    x = [ 0, 1, 0 ] # v1, v2, v1という結果

    hmm = HMM(p, A, B)
    print(hmm.forward(x, c))
    print(hmm.backward(x, c))
8
7
1

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
8
7