inputデータは,np.arrayで用意してください
trainデータは、毎回変わるようにするか、バッチで抽出するようにして、学習させる方が良いです。
隠れ層の数は何が適切でわからないので、もう1段for文を加えて比較するとかが良いかと思います
# x個の素性からy個に分類するdeeplearning
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
# inputデータは,np.arrayで用意してください
# あらかじめ決める値
NUM_IMPUT = 45
NUM_HIDDEN1 = 15
NUM_OUTPUT = 3
TRAIN_TIMES = 101
# 関数
def inference(x_placeholder):
with tf.name_scope('hidden1') as scope:
hidden1_weight = tf.Variable(tf.truncated_normal([NUM_IMPUT, NUM_HIDDEN1], stddev=0.1), name="hidden1_weight")
hidden1_bias = tf.Variable(tf.constant(0.1, shape=[NUM_HIDDEN1]), name="hidden1_bias")
hidden1_output = tf.nn.relu(tf.matmul(x_placeholder, hidden1_weight) + hidden1_bias)
with tf.name_scope('output') as scope:
output_weight = tf.Variable(tf.truncated_normal([NUM_HIDDEN1, NUM_OUTPUT], stddev=0.1), name="output_weight")
output_bias = tf.Variable(tf.constant(0.1, shape=[NUM_OUTPUT]), name="output_bias")
output = tf.nn.softmax(tf.matmul(hidden1_output, output_weight) + output_bias)
return output
def loss(output, y_placeholder, loss_label_placeholder):
with tf.name_scope('loss') as scope:
loss = tf.reduce_mean(-tf.reduce_sum(y_placeholder * tf.log(output), reduction_indices=[1]))
return loss
def training(loss):
with tf.name_scope('training') as scope:
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
return train_step
y_placeholder = tf.placeholder("float", [None, NUM_OUTPUT], name="y_placeholder")
x_placeholder = tf.placeholder("float", [None, NUM_IMPUT], name="x_placeholder")
loss_label_placeholder = tf.placeholder("string", name="loss_label_placeholder")
output = inference(x_placeholder)
loss = loss(output, y_placeholder, loss_label_placeholder)
training_op = training(loss)
correct_prediction = tf.equal(tf.argmax(output,1), tf.argmax(y_placeholder,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
best_loss = float("inf")
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
# 実行
for step in range(TRAIN_TIMES):
x_train,y_train=xxxxxxxxxxxxxx
feed_dict_train={y_placeholder: y_train,x_placeholder: x_train,loss_label_placeholder: "loss_train"}
if step%20 == 0:print "step %d, train accuracy %g"%(step,accuracy.eval(feed_dict=feed_dict_train))
sess.run(training_op, feed_dict=feed_dict_train)
x_test,y_test=xxxxxxxxxxxxxxxxxxx
feed_dict_test={y_placeholder: y_test,x_placeholder: x_test,loss_label_placeholder: "loss_test"}
print "test accuracy %g"%(accuracy.eval(feed_dict=feed_dict_test))
y_pred = sess.run(output, feed_dict=feed_dict_test)
print y_pred
# a=[y_pred[:,0],y_pred[:,1],y_pred[:,2],y_test[:,0],y_test[:,1],y_test[:,2]]
# with open('pred.csv', 'w') as f:
# writer = csv.writer(f)
# writer.writerows(a)
これいい
http://dev.classmethod.jp/machine-learning/tensorflow-math/
パラメータチューニングのコツ
http://www.atmarkit.co.jp/ait/articles/1512/16/news020.html