-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvgg16_transfer_learning_tb.py
135 lines (122 loc) · 4.89 KB
/
vgg16_transfer_learning_tb.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
# vgg16 model used for transfer learning
import sys
from matplotlib import pyplot
from keras.utils import to_categorical
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Dense
from keras.layers import Flatten
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
# ----
import keras
from keras.callbacks import TensorBoard
import tensorflow as tf
import numpy as np
from random import seed
import time
import datetime
import os
import matplotlib.pyplot as plt
import io
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python'
# ----|
# define cnn model
def define_model():
# load model
model = VGG16(include_top=False, input_shape=(224, 224, 3))
# mark loaded layers as not trainable
for layer in model.layers:
layer.trainable = False
# add new classifier layers
flat1 = Flatten()(model.layers[-1].output)
class1 = Dense(128, activation='relu', kernel_initializer='he_uniform')(flat1)
output = Dense(1, activation='sigmoid')(class1)
# define new model
model = Model(inputs=model.inputs, outputs=output)
# compile model
opt = SGD(lr=0.001, momentum=0.9)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
return model
# # plot diagnostic learning curves
# def summarize_diagnostics(history):
# # plot loss
# pyplot.subplot(211)
# pyplot.title('Cross Entropy Loss')
# pyplot.plot(history.history['loss'], color='blue', label='train')
# pyplot.plot(history.history['val_loss'], color='orange', label='test')
# # plot accuracy
# pyplot.subplot(212)
# pyplot.title('Classification Accuracy')
# pyplot.plot(history.history['accuracy'], color='blue', label='train')
# pyplot.plot(history.history['val_accuracy'], color='orange', label='test')
# # save plot to file
# filename = sys.argv[0].split('/')[-1]
# pyplot.savefig(filename + '_plot.png')
# pyplot.close()
# ----
def plot_to_image(figure):
"""Converts a Matplotlib figure to a TensorFlow image tensor."""
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
image = tf.image.decode_png(buf.getvalue(), channels=4)
image = tf.expand_dims(image, 0)
return image
# ----|
# run the test harness for evaluating a model
def run_test_harness():
# define model
model = define_model()
# ----
# generates a unique subdirectory name for each run
logs_vgg16 = "logs_vgg16/fit/vgg1_" + datetime.datetime.now().strftime("%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logs_vgg16, histogram_freq=1)
# ----|
# create data generator
datagen = ImageDataGenerator(featurewise_center=True)
# specify imagenet mean values for centering
datagen.mean = [123.68, 116.779, 103.939]
# prepare iterator
train_it = datagen.flow_from_directory('dataset_rats_vs_squirrels/train/', class_mode='binary', batch_size=64, target_size=(224, 224))
test_it = datagen.flow_from_directory('dataset_rats_vs_squirrels/test/', class_mode='binary', batch_size=64, target_size=(224, 224))
# fit model
# ----
start_time = time.time()
history = model.fit(train_it, steps_per_epoch=len(train_it), validation_data=test_it, validation_steps=len(test_it), epochs=10, verbose=2, callbacks=[tensorboard_callback])
end_time = time.time()
training_time = end_time - start_time
print("Training time: ", training_time)
print("Training loss: ", model.history.history['loss'][-1])
print("Training accuracy: ", (model.history.history['accuracy'][-1])*100.0)
# ----|
# evaluate model
_, acc = model.evaluate(test_it, steps=len(test_it), verbose=2)
print('Test accuracy: > %.3f' % (acc * 100.0))
# ----
# visualize test images and predictions
test_it.reset()
test_images, test_labels = next(test_it)
test_preds = model.predict(test_images, verbose=0)
# convert predictions from probabilities to class labels
test_preds_classes = [1 if x>0.5 else 1 for x in test_preds]
# log test images and predictions on tensorboard
logdir = "logs_vgg16/images"
file_writer = tf.summary.create_file_writer(logdir)
with file_writer.as_default():
figure, axes = plt.subplots(nrows=5, ncols=8, figsize=(40, 25))
for i, ax in enumerate(axes.flat):
if i < len(test_images):
ax.imshow(test_images[i])
ax.set_title("True: {} Pred: {}".format(test_labels[i], test_preds_classes[i]))
ax.axis("off")
plt.tight_layout()
tf.summary.image("Test Images", plot_to_image(figure), step=0)
tf.summary.scalar("Test Accuracy", acc, step=0)
print()
print("Number of model parameters: ", model.count_params())
# ----|
# # learning curves
# summarize_diagnostics(history)
# entry point, run the test harness
run_test_harness()