動作環境
GeForce GTX 1070 (8GB)
ASRock Z170M Pro4S [Intel Z170chipset]
Ubuntu 16.04 LTS desktop amd64
TensorFlow v1.1.0
cuDNN v5.1 for Linux
CUDA v8.0
Python 3.5.2
IPython 6.0.0 -- An enhanced Interactive Python.
gcc (Ubuntu 5.4.0-6ubuntu1~16.04.4) 5.4.0 20160609
GNU bash, version 4.3.48(1)-release (x86_64-pc-linux-gnu)
TFRecords読込み版sine curveの学習を行う過程で関連技術を学ぶ。
v0.1: http://qiita.com/7of9/items/ab27432caedbfb301650
前回: http://qiita.com/7of9/items/ab27432caedbfb301650
v0.3 初期化処理API変更
TensorFlowのinitialize_all_variablesがDeprecatedになった件によりますと
- initialize_local_variables()
- initialize_all_variables()
はDeprecatedになったため、それぞれ以下の使用に変更しました。
- tf.local_variables_initializer()
- tf.global_variables_initializer()
# init_op = [tf.initialize_all_variables(), tf.initialize_local_variables()]
init_op = [tf.global_variables_initializer(), tf.local_variables_initializer()]
情報感謝です。
v0.4 training実装中 > エラー対処
TFRecordsファイル生成プログラム
こちらから変更なし
学習プログラム v0.4
trainingを追加。
place_holderを使うので良いのか未消化。
learn_sineCurve_170708.py
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
"""
v0.4 Jul. 09, 2017
- change parameters for shuffle_batch()
- implement training
v0.3 Jul. 09, 2017
- fix warning > use tf.local_variables_initializer() instead of initialize_local_variables()
- fix warning > use tf.global_variables_initializer() instead of initialize_all_variables()
v0.2 Jul. 08, 2017
- fix bug > OutOfRangeError (current size 0)
+ use [tf.initialize_local_variables()]
v0.1 Jul. 08, 2017
- only read [.tfrecords]
+ add inputs_xy()
+ add read_and_decode()
"""
# codingrule: PEP8
IN_FILE = 'input_170708.tfrecords'
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'x_raw': tf.FixedLenFeature([], tf.string),
'y_raw': tf.FixedLenFeature([], tf.string),
})
x_raw = tf.decode_raw(features['x_raw'], tf.float32)
y_raw = tf.decode_raw(features['y_raw'], tf.float32)
x_org = tf.reshape(x_raw, [1])
y_org = tf.reshape(y_raw, [1])
return x_org, y_org
def inputs_xy():
filename = IN_FILE
filequeue = tf.train.string_input_producer(
[filename], num_epochs=1)
x_org, y_org = read_and_decode(filequeue)
return x_org, y_org
x_orgs, y_orgs = inputs_xy()
batch_size = 1 # 4
x_batch, y_batch = tf.train.shuffle_batch([x_orgs, y_orgs],
batch_size,
capacity=40,
min_after_dequeue=batch_size)
# init_op = [tf.initialize_all_variables(), tf.initialize_local_variables()]
init_op = [tf.global_variables_initializer(), tf.local_variables_initializer()]
input_ph = tf.placeholder("float", [None,1])
output_ph = tf.placeholder("float",[None,1])
## network
hiddens = slim.stack(input_ph, slim.fully_connected, [7,7,7],
activation_fn=tf.nn.sigmoid, scope="hidden")
#prediction = slim.fully_connected(hiddens, 1, activation_fn=tf.nn.sigmoid, scope="output")
prediction = slim.fully_connected(hiddens, 1, activation_fn=None, scope="output")
loss = tf.contrib.losses.mean_squared_error(prediction, output_ph)
train_op = slim.learning.create_train_op(loss, tf.train.AdamOptimizer(0.001))
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
for idx in range(30000):
print(idx, end=', ')
inpbt, outpb = sess.run([x_batch, y_batch])
finally:
coord.request_stop()
実行とエラー > 1 epochで停止する
実行すると以下のように100個のデータ読込み後にエラーになる。
...
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, Traceback (most recent call last):
...
以下の部分が問題だった。
filequeue = tf.train.string_input_producer(
[filename], num_epochs=1)
num_epochs=None
にすることで、エラー停止しなくなった。
修正後コード v0.4
learn_sineCurve_170708.py
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
"""
v0.4 Jul. 09, 2017
- fix bug > stops only for one epoch
+ set [num_epochs=None] for string_input_producer()
- change parameters for shuffle_batch()
- implement training
v0.3 Jul. 09, 2017
- fix warning > use tf.local_variables_initializer() instead of initialize_local_variables()
- fix warning > use tf.global_variables_initializer() instead of initialize_all_variables()
v0.2 Jul. 08, 2017
- fix bug > OutOfRangeError (current size 0)
+ use [tf.initialize_local_variables()]
v0.1 Jul. 08, 2017
- only read [.tfrecords]
+ add inputs_xy()
+ add read_and_decode()
"""
# codingrule: PEP8
IN_FILE = 'input_170708.tfrecords'
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'x_raw': tf.FixedLenFeature([], tf.string),
'y_raw': tf.FixedLenFeature([], tf.string),
})
x_raw = tf.decode_raw(features['x_raw'], tf.float32)
y_raw = tf.decode_raw(features['y_raw'], tf.float32)
x_org = tf.reshape(x_raw, [1])
y_org = tf.reshape(y_raw, [1])
return x_org, y_org
def inputs_xy():
filename = IN_FILE
filequeue = tf.train.string_input_producer(
[filename], num_epochs=None)
x_org, y_org = read_and_decode(filequeue)
return x_org, y_org
x_orgs, y_orgs = inputs_xy()
batch_size = 1 # 4
x_batch, y_batch = tf.train.shuffle_batch([x_orgs, y_orgs],
batch_size,
capacity=40,
min_after_dequeue=batch_size)
# init_op = [tf.initialize_all_variables(), tf.initialize_local_variables()]
init_op = [tf.global_variables_initializer(), tf.local_variables_initializer()]
input_ph = tf.placeholder("float", [None,1])
output_ph = tf.placeholder("float",[None,1])
## network
hiddens = slim.stack(input_ph, slim.fully_connected, [7,7,7],
activation_fn=tf.nn.sigmoid, scope="hidden")
#prediction = slim.fully_connected(hiddens, 1, activation_fn=tf.nn.sigmoid, scope="output")
prediction = slim.fully_connected(hiddens, 1, activation_fn=None, scope="output")
loss = tf.contrib.losses.mean_squared_error(prediction, output_ph)
train_op = slim.learning.create_train_op(loss, tf.train.AdamOptimizer(0.001))
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
for idx in range(30000):
print(idx, end=', ')
inpbt, outpb = sess.run([x_batch, y_batch])
finally:
coord.request_stop()