LoginSignup
1
2

More than 5 years have passed since last update.

Transfer learning (vgg16)

Posted at

Reference

Keras Documentation

Pattern 1

from keras.applications.vgg16 import VGG16

from keras.models import Model
from keras.layers import Input, Dense, Flatten, Dropout
base_model = VGG16(weights='imagenet', include_top=False)

img_width = 512
img_height = 512

x_input = Input(shape=(img_width, img_height, 3))

x = base_model(x_input)
x = Flatten()(x)
x = Dense(32, activation='relu')(x)
x = Dense(32, activation='relu')(x)
x = Dropout(0.3)(x)
out = Dense(1, activation='sigmoid')(x)

model = Model(inputs=x_input, outputs=out)

for layer in base_model.layers:
    layer.trainable = False

model.summary()

image.png

Pattern2 (U-net)

from keras.applications.vgg16 import VGG16

from keras.models import Model

from keras.layers import Input, Conv2D, Conv2DTranspose
from keras.layers import concatenate
img_width = 128
img_height = 128

input_tensor = Input(shape=(img_width, img_height, 3))

vgg_model = VGG16(weights='imagenet', include_top=False, input_tensor=input_tensor)

vgg_model.summary()

image.png

image.png

vgg_top = vgg_model.get_layer('block5_conv2').output

block1_conv2 = vgg_model.get_layer('block1_conv2').output
block2_conv2 = vgg_model.get_layer('block2_conv2').output
block3_conv3 = vgg_model.get_layer('block3_conv3').output
block4_conv3 = vgg_model.get_layer('block4_conv3').output
start_neurons = 64

# 8 -> 16
deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding='same')(vgg_top)
uconv4 = concatenate([deconv4, block4_conv3])
uconv4 = Dropout(0.5)(uconv4)
uconv4 = Conv2D(start_neurons * 8, (3, 3), activation='relu', padding='same')(uconv4)
uconv4 = Conv2D(start_neurons * 8, (3, 3), activation='relu', padding='same')(uconv4)

# 16 -> 32
deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
uconv3 = concatenate([deconv3, block3_conv3])
uconv3 = Dropout(0.5)(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation="relu", padding="same")(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation="relu", padding="same")(uconv3)

# 32 -> 64
deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
uconv2 = concatenate([deconv2, block2_conv2])
uconv2 = Dropout(0.5)(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation="relu", padding="same")(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation="relu", padding="same")(uconv2)

# 64 -> 128
deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
uconv1 = concatenate([deconv1, block1_conv2])
uconv1 = Dropout(0.5)(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation="relu", padding="same")(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation="relu", padding="same")(uconv1)

out = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv1)

model = Model(inputs=input_tensor, outputs=out)

#for layer in model.layers[:17]:
#    layer.trainable = False

for layer in vgg_model.layers:
    layer.trainable = False 

model.summary()   

image.png

image.png

image.png

image.png

1
2
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
1
2