forked from Nirmalvekariya/Video-Sentiment-Analysis
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpredictemt.py
100 lines (84 loc) · 3.44 KB
/
predictemt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
from tensorflow.keras.models import model_from_json
import numpy as np
import cv2
import math
import tensorflow as tf
from tensorflow.keras.preprocessing import image
facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
from matplotlib import pyplot as plt
import os
import shutil
from skimage.measure import compare_ssim
with open("model.json", "r") as json_file: #Loading the saved model
loaded_model_json = json_file.read()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("model_weights.h5")
loaded_model._make_predict_function()
label_to_text = {0:'angry', 1:'disgust', 2:'fear', 3:'happy', 4: 'sad'}
def pred(img_path):
label_to_text = {0:'angry', 1:'disgust', 2:'fear', 3:'happy', 4: 'sad'}
img=cv2.imread(img_path) #read Image
gray_fr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #covert image to grayscale
faces_rects = facec.detectMultiScale(gray_fr, scaleFactor = 1.2, minNeighbors = 5) #opencv's cascade classifier will be used for detecting the face
if len(faces_rects)!=0:
for (x, y, w, h) in faces_rects:
fc = gray_fr[y:y+h, x:x+w] #extracting only the face part
roi = cv2.resize(fc, (48, 48)) #resizing it according to the image that are acceptable by our model
img = image.img_to_array(roi)
img = img/255
img = np.expand_dims(img, axis=0)
return label_to_text[np.argmax(loaded_model.predict(img))],img #model.predict is used for predicting the emotion
else:
return 0,0 #return 0 if the face is not found
def removeout():
shutil.rmtree('output/') #remove output folder
def vidframe(vidname):
if vidname==0:
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.mp4',fourcc, 20.0, (640,480))
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
vidname="output.mp4"
if os.path.exists('output'): #if output folder is present then delete it
removeout() #create Output folder for storing frame
os.mkdir('output')
cap = cv2.VideoCapture(vidname) #capture video
frameRate=cap.get(5)
count = 0
while(cap.isOpened()): #store the frames in output folder
frameId = cap.get(1)
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
filename ="output/frame%d.jpg" % count;count+=1
cv2.imwrite(filename, frame)
cap.release()
result=[] # used for storing emotion
face=[] #used for storing face images
for filename in os.listdir("output"): #loop through each frame
a,b = pred("output/"+filename) #run pred function to get emotion and face images
result.append(a)
face.append(b)
removeout()
result=[x for x in result if x!=0] #removing null prediction
face=[x for x in face if len(str(x))>1]
return result, face
def ssimscore1(im1,im2):
im1=im1.reshape(48, 48, 1).astype('float32') #reshaping the flattened image array
im2=im2.reshape(48, 48, 1).astype('float32')
(score, diff) = compare_ssim(im1, im2, full=True,multichannel=True) #comparing the image for finding difference using compare_ssim function
return score