LoginSignup
0
0

More than 5 years have passed since last update.

質問です! tensorboardについて

Posted at

私は研究でNNの勉強をしています.
フレームワークでtensorflowを用いていますが,ログへの書き込みのところで上手くいきません.おそらく
if i % 100 == 0:
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={x:mnist.test.images, t:mnist.test.labels, keep_prob:1.0})
print ('Step: %d, Loss: %f, Accuracy: %f' % (i, loss_val, acc_val))
a = acc_val
#ログを取る処理を実行する(出力はログ情報が書かれた)
summary_str = sess.run(summary_op, feed_dict={x:mnist.test.images, t:mnist.test.labels, keep_prob:1.0})
#ログ情報のプロトコルバッファを書き込む
summary_writer.add_summary(summary_str, i)
fp.write("{0}\n" .format(a))
で間違えているのですが,私の知識不足で解決できません.是非,お答え願います.
以下に,全コードを記しています.

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import time

start_time = time.time()
print("開始時刻: " + str(start_time))

H = 100
BATCH_SIZE = 100
DROP_OUT_RATE = 0.5

訓練データ

x = tf.placeholder(tf.float32, [None, 784])

入力画像をログに出力

img = tf.reshape(x,[-1,28,28,1])
tf.summary.image("input_data", img, 10)

教師データ

t = tf.placeholder(tf.float32, [None, 10])

入力層から中間層①

with tf.name_scope("hidden1"):
w1 = tf.Variable(tf.random_normal([784, H], mean=0.0, stddev=0.05))
b1 = tf.Variable(tf.zeros([H]))
a1 = tf.sigmoid(tf.matmul(x, w1) + b1)
#中間層①の重みの分布をログ出力
tf.summary.histogram('w1',w1)

中間層①から中間層②

with tf.name_scope("hidden2"):
w2 = tf.Variable(tf.random_normal([H, H], mean=0.0, stddev=0.05))
b2 = tf.Variable(tf.zeros([H]))
a2 = tf.sigmoid(tf.matmul(a1, w2) + b2)
#中間層②の重みの分布をログ出力
tf.summary.histogram('w2',w2)

中間層②から出力層

with tf.name_scope("output"):
w3 = tf.Variable(tf.random_normal([H, 10], mean=0.0, stddev=0.05))
b3 = tf.Variable(tf.zeros([10]))
#ドロップアウト
keep_prob = tf.placeholder(tf.float32)
drop = tf.nn.dropout(a2, keep_prob)
y = tf.nn.relu(tf.matmul(drop, w3) + b3)

誤差

loss = tf.nn.l2_loss(y - t) / BATCH_SIZE

誤差をログ出力

tf.summary.scalar("loss", loss)

訓練

with tf.name_scope("train"):

train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)

評価

with tf.name_scope("accuracy"):
correct = tf.equal(tf.argmax(y, 1), tf.argmax(t, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

精度をログ出力

tf.summary.scalar("accuracy", accuracy)

一部のログをマージ

merge = tf.summary.merge([a,b1,c])

全てのログをマージ

summary_op = tf.summary.merge_all()

summary_op = tf.merge_all_summaries()

fp = open("MLP.txt","w")

with tf.Session() as sess:
summary_writer = tf.summary.FileWriter('abc', sess.graph)
mnist = input_data.read_data_sets('.\mnist', one_hot=True, dtype=tf.uint8)
sess.run(tf.global_variables_initializer())
i = 0

for _ in range(1000):
    i += 1
    bx, bt = mnist.train.next_batch(BATCH_SIZE)
    sess.run([train_step], feed_dict={x:bx, t:bt, keep_prob:(1-DROP_OUT_RATE)})



    if  i % 100 == 0:
        loss_val, acc_val = sess.run([loss, accuracy], feed_dict={x:mnist.test.images, t:mnist.test.labels, keep_prob:1.0})
        print ('Step: %d, Loss: %f, Accuracy: %f' % (i, loss_val, acc_val))
        a = acc_val
        #ログを取る処理を実行する(出力はログ情報が書かれた)
        summary_str = sess.run(summary_op, feed_dict={x:mnist.test.images, t:mnist.test.labels, keep_prob:1.0})
        #ログ情報のプロトコルバッファを書き込む
        summary_writer.add_summary(summary_str, i)
        fp.write("{0}\n" .format(a))

tensorboard --logdir='abc'

http://localhost:6006

fp.close()

end_time = time.time()
print("終了時刻: ",end_time)
print ("かかった時間: ", (end_time - start_time))

0
0
1

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0