-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathInceptionV3_training.py
123 lines (94 loc) · 3.95 KB
/
InceptionV3_training.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
from __future__ import division
import numpy as np
from keras import backend as K
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from experiments.utils import HistoryLog
from keras.layers import Dense
from keras.layers import GlobalAveragePooling2D
from keras.applications.inception_v3 import InceptionV3
from keras.models import Model
from keras.layers import Dropout
import experiments as exp
from keras.preprocessing import image
import os
import datetime
import time
from keras.models import Model
from keras.layers import Flatten, Dense, Input, BatchNormalization, merge
from keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D
from keras.layers import Dropout
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras import backend as K
def get_session(gpu_fraction=0.8):
import tensorflow as tf
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
def train_net():
img_width, img_height = 299, 299
seed = 42
np.random.seed(seed)
base_model = InceptionV3(include_top=False, weights='imagenet')
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(21, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in model.layers[:249]:
layer.trainable = False
for layer in model.layers[249:]:
layer.trainable = True
model.summary()
sgd = SGD(lr=0.0001, momentum=0.9)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(rescale=1. / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
val_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory('data/training',
target_size=(img_height, img_width),
class_mode='categorical',
batch_size=1)
validation_generator = val_datagen.flow_from_directory(
'data/validation',
target_size=(img_height, img_width),
class_mode='categorical',
batch_size=1)
# checkpoint
weights_filepath = "weights.InceptionV3.{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(weights_filepath, monitor='val_acc', verbose=1)
history = HistoryLog()
# fine-tune the model
model.fit_generator(
train_generator,
steps_per_epoch=36095,# 36095,#15,
epochs=10,
callbacks=[checkpoint, history],
validation_data=validation_generator,
validation_steps=6225) # 6225)#20)
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M:%S')
loss_filepath = "InceptionV3.first_phase.loss." + st + ".log"
history.log_training_loss(loss_filepath)
epoch_filepath = "InceptionV3.first_phase.epoch." + st + ".log"
history.log_epoch(epoch_filepath)
if K.backend() == 'tensorflow':
K.clear_session()
if __name__ == '__main__':
if K.backend() == 'tensorflow':
import keras.backend.tensorflow_backend as KTF
KTF.set_session(get_session(0.8))
train_net()