-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathModel_training&Heatmap.py
111 lines (81 loc) · 3.27 KB
/
Model_training&Heatmap.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# -*- coding: utf-8 -*-
"""Project Training.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Kr15oI3Jz958EB30I3We9V278wZnGd83
"""
#!pip install tensorflow-gpu
import sklearn as sk
from sklearn.metrics import confusion_matrix
import seaborn as sns
import sys
import numpy
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential # For constructing model
from tensorflow.keras.layers import Dense, Dropout, Flatten # Layer cores
from tensorflow.keras.layers import Conv2D, MaxPooling2D # CNN layers
from tensorflow.keras.utils import to_categorical # Extra utilities
import pickle
from sklearn.model_selection import train_test_split
import os
from google.colab import drive
drive.mount('/content/drive')
from google.colab import drive
drive.mount('/content/drive')
# GPU test code
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
def loadData(fileName,size=0.2):
with open('/content/drive/My Drive/X_Y_Data.pickle', 'rb') as f:
X, Y = pickle.load(f)
X=X.reshape(-1,45,45,1)
X_train, X_test, y_train, y_test = train_test_split(X,Y,test_size = size)
return X_train, X_test, y_train, y_test
def createModel(size):
model = Sequential()
# Images are 48 by 48
model.add(Conv2D(32, (3,3), activation='relu', input_shape=size)) #46 by 46
model.add(MaxPooling2D())
model.add(Conv2D(64, (3,3), activation='relu')) #44 by 44
model.add(MaxPooling2D())
model.add(Conv2D(128, (3,3), activation='relu')) #44 by 44
model.add(MaxPooling2D())
model.add(Dropout(rate=0.15))
model.add(Flatten()) #1964 by 1
model.add(Dense(500, activation='relu')) #500 by 1
model.add(Dropout(0.2))
model.add(Dense(250, activation='relu')) #250 by 1
model.add(Dropout(0.2))
model.add(Dense(125, activation='relu')) #120 by 1
model.add(Dropout(0.2))
model.add(Dense(66, activation='softmax')) # 66 by 1 (only english, digits, and symbols)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
X_train, X_test, y_train, y_test = loadData('X_Y_Data.pickle')
model = createModel(X_train.shape[1:])
checkpoint_path = "training/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_weights_only=True, period = 2)
model.save_weights(checkpoint_path.format(epoch=0))
model.fit(X_train, y_train, epochs=16, callbacks=[cp_callback], validation_split = .2)
loss, acc = model.evaluate(X_test, y_test)
# Get Confusion matrix
con_mat = confusion_matrix(y_test, model.predict(X_test).argmax(axis=1))
con_mat = con_mat.astype(float)
# numpy.set_printoptions(threshold=sys.maxsize)
# print(con_mat)
# Normalize confusion matrix row wise
for r in range(66):
s = float(sum(con_mat[r, :]))
for c in range(66):
con_mat[r, c] = float(con_mat[r, c])/s
#print(con_mat)
# Create heat map of confusion matrix
plt.figure(figsize=(18, 14))
sns.heatmap(con_mat, square=True, linewidths=0.01, linecolor='#A9A9A9')