LoginSignup
7
6

More than 5 years have passed since last update.

可視化メモ

Last updated at Posted at 2018-01-24

ライブラリを用いてよくつかう可視化めも

(一部うまくいかなくなってる(バージョン違ってるせいか?)

学習モデルで出力層をカットし, 最後の全結合層を出力とするモデルの構築

#cutting softmax layer 
from keras.models import Model
res = Model(inputs=model.input,
            outputs=model.get_layer('dense_1').output)
mel = res.predict(x_train)

PCA

from sklearn.decomposition import PCA

X_reduced = PCA(n_components=2).fit_transform(mel[:1000])
plt.scatter(X_reduced[:,0],X_reduced[:,1],cmap=plt.cm.RdYlGn,c=[np.argmax(i) for i in y_train[:1000]])
plt.colorbar()

mnist_pca.png

T-SNE

from sklearn.manifold import TSNE
X_reduced = TSNE(n_components=2,random_state=0).fit_transform(mel[:1000])
plt.scatter(X_reduced[:,0],X_reduced[:,1],cmap=plt.cm.RdYlGn,c=[np.argmax(i) for i in y_train[:1000]])
plt.colorbar()

mnist_tsne.png

Tensorboardを用いた可視化

スクリーンショット 2017-11-19 1.42.33.png

import os
import numpy as np
from PIL import Image

ROOT_DIR = 'logs'
mnist_sprites_path = os.path.join(ROOT_DIR,'images.jpg')
mnist_metadata_path = os.path.join(ROOT_DIR,'metadata.tsv')

# Save the sprite image
img_array = x_test.reshape(100, 100, 28, 28)
img_array_flat = np.concatenate([np.concatenate([x for x in row], axis=1) for row in img_array])
img = Image.fromarray(np.uint8(255 * (1. - img_array_flat)))
img.save(os.path.join(ROOT_DIR, 'images.jpg'))

import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(12,12))
plt.imshow(Image.open('logs/images.jpg'),cmap='gray')

# Save the metadata
with open(mnist_metadata_path,'w') as f:
    f.write("Index\tLabel\n")
    for index,label in enumerate(np.where(y_test)[1]):
        f.write("%d\t%d\n" % (index,label))

%head metadata.tsv
Index   Label
0   7
1   2
2   1
3   0
4   4
5   1
6   4
7   9
8   5

images.jpg
images.jpg

from tensorflow.contrib.tensorboard.plugins import projector

# Creating the embeddings
from keras.models import Model
res = Model(inputs=model.input,
                     outputs=model.get_layer('dense_15').output)

emb = res.predict(x_test)
embedding_var = tf.Variable(emb,  name='mnist_embedding')

writer = tf.summary.FileWriter(ROOT_DIR)

# Create the embedding projectorc
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name

# Specify where you find the metadata
#embedding.metadata_path = mnist_metadata_path #metadata.tsv
embedding.metadata_path='metadata.tsv'

# Specify where you find the sprite
#embedding.sprite.image_path = mnist_sprites_path #images.jpg
embedding.sprite.image_path='images.jpg'
embedding.sprite.single_image_dim.extend([28,28])

projector.visualize_embeddings(writer, config)

# Saving the data
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.save(sess, os.path.join(ROOT_DIR,'model.ckpt'),1)

実行コマンド

tensorboard --logdir=logs

以下↓, コード全文
kerasのCNN-MNISTのサンプルコード

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0" 

import tensorflow as tf
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K

batch_size = 128
num_classes = 10
epochs = 12

# input image dimensions
img_rows, img_cols = 28, 28

# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()

if K.image_data_format() == 'channels_first':
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
    x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
                 activation='relu',
                 input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))

#for layer in model.layers:
#    print(layer.output)
model.summary()

#cutting softmax layer 
from keras.models import Model
res = Model(inputs=model.input,
            outputs=model.get_layer('dense_1').output)

# vis
mel = res.predict(x_train)

model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_19 (Conv2D)           (None, 26, 26, 32)        320       
_________________________________________________________________
conv2d_20 (Conv2D)           (None, 24, 24, 64)        18496     
_________________________________________________________________
max_pooling2d_10 (MaxPooling (None, 12, 12, 64)        0         
_________________________________________________________________
dropout_18 (Dropout)         (None, 12, 12, 64)        0         
_________________________________________________________________
flatten_10 (Flatten)         (None, 9216)              0         
_________________________________________________________________
dense_15 (Dense)             (None, 128)               1179776   
_________________________________________________________________
dropout_19 (Dropout)         (None, 128)               0         
_________________________________________________________________
dense_16 (Dense)             (None, 10)                1290      
=================================================================
Total params: 1,199,882
Trainable params: 1,199,882
Non-trainable params: 0
_________________________________________________________________

%ls logs
checkpoint  metadata.tsv                      model.ckpt-1.index  projector_config.pbtxt
images.jpg  model.ckpt-1.data-00000-of-00001  model.ckpt-1.meta

参考

7
6
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
7
6