-
Notifications
You must be signed in to change notification settings - Fork 1
/
cwp_project1_gui.py
132 lines (116 loc) · 5.28 KB
/
cwp_project1_gui.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
# -*- coding: utf-8 -*-
"""cwp_project1_gui.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bkOag2ypY8fm2dFDPIQDOyw5vm8VjdQ0
"""
#imprting necessary packages
import tkinter
from tkinter import *
import tkinter
import cv2
from PIL import Image, ImageTk
import os
import numpy as np
import cv2
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Dropout, Flatten
from tensorflow.python.keras.layers import Conv2D
from tensorflow.keras.optimizers import Adam
from tensorflow.python.keras.layers import MaxPooling2D
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
import threading
# os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
#making model using squential and adding layers
emotion_model = Sequential()
emotion_model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))
emotion_model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Dropout(0.25))
emotion_model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Dropout(0.25))
emotion_model.add(Flatten())
emotion_model.add(Dense(1024, activation='relu'))
emotion_model.add(Dropout(0.5))
emotion_model.add(Dense(7, activation='softmax'))
emotion_model.save_weights('model4.h5')
cv2.ocl.setUseOpenCL(False)
emotion_dict = {0: " Angry ",1: " Disgusted " ,2:" Frearful ",3:" Happy ",4:" Neutral ",5:" Sad ",6 : " Surprised " }
cur_path = os.path.dirname(os.path.abspath(__file__))
emoji_dist = {0:cur_path+"/emojis/angry.png",1:cur_path+"/emojis/disgusted.png",2:cur_path+"/emojis/fearful.png",3:cur_path+"/emojis/happy.png",4:cur_path+"/emojis/neutral.png",5:cur_path+"/emojis/sad.png",6:cur_path+"/emojis/surpriced.png",}
global last_frame1
last_frame1 = np.zeros((480 , 640, 3), dtype=np.uint8)
global cap1
show_text=[0]
global frame_number
#video processing function
def show_subject():
cap1 = cv2.VideoCapture(-1) #to open our video/webcam
ret, frame = cap1.read()
if not cap1.isOpened():
print("cant open the camera1")
global frame_number
length = int(cap1.get(cv2.CAP_PROP_FRAME_COUNT))
frame_number += 1
if frame_number >= length:
exit()
cap1.set(1,frame_number)
flag1, frame1 = cap1.read() #flag1 tells us if we reading soemthing or not and frame1 is an array btowh would be returned by read() function
frame1 = cv2.resize (frame1,(600,500)) #resizing the camera
bounding_box = cv2.CascadeClassifier('./data/haarcascade_frontalface_default.xml')
gray_frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
num_faces = bounding_box.detectMultiScale(gray_frame,scaleFactor =1.3 ,minNeighbors = 5)
for(x,y,w,h) in num_faces:
cv2.rectangle(frame1,(x,y-50),(x+w , y+h+10),(255,0,0) ,2)
roi_gray_frame = gray_frame[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame,(48,48)), -1 ), 0)
prediction = emotion_model.predict(cropped_img)
maxindex= int(np.argmax(prediction))
cv2.putText(frame1, emotion_dict[maxindex], (x+20 , y-60), cv2.FONT_HERSHEY_SIMPLEX ,1, (255, 255, 255), 2, cv2.LINE_AA)
show_text[0] = maxindex
if flag1 is None:
print ("Major error!")
elif flag1:
global last_frame1
last_frame1= frame1.copy()
pic = cv2.cvtColor(last_frame1 ,cv2.COLOR_BGR2RGB)
img = Image.fromarray(pic)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
root.update()
lmain.after(10, show_subject)
if cv2.waitKey(1) & 0xFF == ord('q'):
exit()
def show_avatar(): #avatar/ emoji placement
frame2=cv2.imread(emoji_dist[show_text[0]])
pic2=cv2.cvtColor(frame2,cv2.COLOR_BGR2RGB)
img2=Image.fromarray(frame2)
imgtk2=ImageTk.PhotoImage(image=img2)
lmain2.imgtk2=imgtk2
lmain3.configure(text=emotion_dict[show_text[0]],font=('arial',45,'bold'))
lmain2.configure(image=imgtk2)
root.update()
lmain2.after(10, show_avatar)
if __name__ == '__main__':
frame_number = 0 #global variable
root=tkinter.Tk()
lmain = tkinter.Label(master=root,padx=50,bd=10) #labels which contents images/video
lmain2 = tkinter.Label(master=root,bd=10) #labels which contents images/video
lmain3 = tkinter.Label(master=root,bd=10, fg="#CDCDCD", bg='grey')
lmain.pack(side=LEFT)
lmain.place(x=50,y=250)
lmain3.pack()
lmain3.place(x=960,y=250)
lmain2.pack(side=RIGHT)
lmain2.place(x=900,y=350)
root.title("Photo of Emoji") #specified title
root.geometry("1400x900+100+10")
root['bg']='black' #gui background
exitButton = Button(root, text='Quit',fg = "red", command=root.destroy, font= ('arial',25,'bold')).pack (side= BOTTOM)
threading.Thread(target=show_subject).start() #subject function #in new threads
threading.Thread(target=show_avatar).start() #avatar function
root.mainloop()