動作環境
GeForce GTX 1070 (8GB)
ASRock Z170M Pro4S [Intel Z170chipset]
Ubuntu 14.04 LTS desktop amd64
TensorFlow v0.11
cuDNN v5.1 for Linux
CUDA v8.0
Python 2.7.6
IPython 5.1.0 -- An enhanced Interactive Python.
gcc (Ubuntu 4.8.4-2ubuntu1~14.04.3) 4.8.4
GNU bash, version 4.3.8(1)-release (x86_64-pc-linux-gnu)
Learning in Multidimensional Spaces — Neural Networks. Matrix Formulation by Andrew P Paplinski
March 17, 2017
の「5.4 (Simple) example of function approximation」における2次元入力(実数R^2)から2次元出力(R^2)へのmappingを実装してみた。
入力と出力の関係式は以下に記載している。(もしくはPDFを参照)。
http://qiita.com/7of9/items/54ec092a91880df9dc64
データ生成コード v0.1
Halton Sequenceを使うことで、R^2の入力を、より均一化するようにした。
UtilHaltonSequence.py
'''
v0.3 Mar. 20, 2017
- change to a static function in a Class
v0.2 Oct. 22, 2016
- implemented in Python
v0.1 Mar., 2005 or so
- implemented in C
'''
# codingrule:PEP8
class CHaltonSequence:
@staticmethod
def calc_Halton_sequence(index):
XBASE = 2
YBASE = 3
inv_xbase = 1.0 / XBASE
fac_x = 1.0 / XBASE
inv_ybase = 1.0 / YBASE
fac_y = 1.0 / YBASE
inp = index
xwrk = 0.0
while inp > 0:
xwrk = xwrk + (inp % XBASE) * inv_xbase
inp = inp / XBASE
inv_xbase = inv_xbase * fac_x
inp = index
ywrk = 0.0
while inp > 0:
ywrk = ywrk + (inp % YBASE) * inv_ybase
inp = inp / YBASE
inv_ybase = inv_ybase * fac_y
return xwrk, ywrk
'''
Usage example:
from UtilHaltonSequence import CHaltonSequence
for idx in range(0, 11):
x0, y0 = CHaltonSequence.calc_Halton_sequence(index=idx)
print(x0, y0)
'''
prep_data_170321.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import math
import sys
from UtilHaltonSequence import CHaltonSequence
'''
on Python 2.7.6
v0.1 Mar. 21, 2017
- output to STD
- add func2d()
- add get_2d_QMC()
- add get_2d_random_nums()
'''
# codingrule:PEP8
def get_2d_random_nums(xsize, ysize):
MAXVAL_PLUS_ONE = 65536
ints = np.random.randint(MAXVAL_PLUS_ONE, size=(xsize, ysize))
# print(ints)
flts = ints / float(MAXVAL_PLUS_ONE)
# print(flts)
return flts
def get_2d_QMC(xsize):
alist = []
for idx in range(0, xsize):
xwrk, ywrk = CHaltonSequence.calc_Halton_sequence(index=idx)
alist.append([xwrk, ywrk])
# print(alist)
return np.array(alist)
def func2d(x1, x2):
rho = x1 ** 2 + x2 ** 2
epsilon = 10.**(-10)
if abs(rho) < epsilon:
return (0.0, 0.0)
y1 = x1 * math.exp(-rho ** 2)
try:
# print(rho) # debug
y2 = math.sin(2.0 * rho ** 2) / 4.0 / rho ** 2
except ZeroDivisionError:
y2 = 0.0
return (y1, y2)
NUM_SAMPLES = 100
# 1. get random numbers in 2D
#
# a. np.random.randint()
# arr_2d = get_2d_random_nums(NUM_SAMPLES,2) # [0,1)
#
# b. QMC using Halton Sequence
arr_2d = get_2d_QMC(NUM_SAMPLES) # [0, 1)
for x1, x2 in arr_2d:
y1, y2 = func2d(x1, x2)
print('%.7f, %.7f, %.7f, %.7f' % (x1, x2, y1, y2))
# sys.exit() # debug
$ python prep_data_170321.py > input.csv
(tensorflow-GPU)yasokada@voyager01:~/WORK/TensorFlow/170319_xxyyFunc$ head input.csv
0.0000000, 0.0000000, 0.0000000, 0.0000000
0.5000000, 0.3333333, 0.4388716, 0.4943511
0.2500000, 0.6666667, 0.1933435, 0.4782739
0.7500000, 0.1111111, 0.5389515, 0.4643882
0.1250000, 0.4444444, 0.1194477, 0.4993122
0.6250000, 0.7777778, 0.2319694, 0.2311777
0.3750000, 0.2222222, 0.3617029, 0.4995656
0.8750000, 0.5555556, 0.2759375, 0.1603667
0.0625000, 0.8888889, 0.0332709, 0.3776411
0.5625000, 0.0370370, 0.5084711, 0.4966077
学習コード v0.1
sine curveの学習をベースに作った。
learn_xxyyfunc_170321.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
'''
v0.1 Mar. 22, 2017
- learn mapping of R^2 input to R^2 output
+ using data prepared by [prep_data_170321.py]
- branched from sine curve learning at
http://qiita.com/7of9/items/ce58e66b040a0795b2ae
'''
# codingrule:PEP8
filename_queue = tf.train.string_input_producer(["input.csv"])
# parse CSV
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
input1, input2, output1, output2 = tf.decode_csv(
value, record_defaults=[[0.], [0.], [0.], [0.]])
inputs = tf.pack([input1, input2])
output = tf.pack([output1, output2])
batch_size = 4 # [4]
inputs_batch, output_batch = tf.train.shuffle_batch(
[inputs, output], batch_size, capacity=40, min_after_dequeue=batch_size)
input_ph = tf.placeholder("float", [None, 2])
output_ph = tf.placeholder("float", [None, 2])
## network
hiddens = slim.stack(input_ph, slim.fully_connected, [7, 7, 7],
activation_fn=tf.nn.sigmoid, scope="hidden")
#prediction = slim.fully_connected(
# hiddens, 2, activation_fn=tf.nn.sigmoid, scope="output")
prediction = slim.fully_connected(
hiddens, 2, activation_fn=None, scope="output")
loss = tf.contrib.losses.mean_squared_error(prediction, output_ph)
train_op = slim.learning.create_train_op(loss, tf.train.AdamOptimizer(0.001))
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
sess.run(init_op)
for i in range(30000): # [10000]
inpbt, outbt = sess.run([inputs_batch, output_batch])
_, t_loss = sess.run([train_op, loss],
feed_dict={input_ph: inpbt, output_ph: outbt})
if (i+1) % 100 == 0:
print("%d,%f" % (i+1, t_loss))
finally:
coord.request_stop()
coord.join(threads)
$ python learn_xxyyfunc_170321.py > res.2in2out_170322
学習経過の可視化
Jupyter code
check_result_170322.ipynb
%matplotlib inline
# mapping of R^2 to R^2
# Mar. 22, 2017
import numpy as np
import matplotlib.pyplot as plt
data1 = np.loadtxt('res.2in2out_170322', delimiter=',')
input1 = data1[:,0]
output1 = data1[:,1]
fig = plt.figure()
ax1 = fig.add_subplot(2,1,1)
ax1.plot(input1, output1)
ax1.set_xlabel('x')
ax1.set_ylabel('sin(x)')
ax1.set_ylim([0,0.02])
ax1.grid(True)
fig.show()
誤差は29800,0.000344
程度にはなった。
時々スパイクは出ている。
実際にmappingが成功しているかは画像で確認することになる。