forked from carolinedunn/facial_recognition
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathfacial_req.py
148 lines (127 loc) · 4.84 KB
/
facial_req.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
#! /usr/bin/python
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import face_recognition
import imutils
import pickle
import time
import cv2
import time
import RPi.GPIO as GPIO
# setup raspberry pi GPIO input for PIR motion
# detector on GPIO 7 (pin 26)
GPIO.setmode(GPIO.BCM)
PIR_PIN = 23
GPIO.setup(PIR_PIN, GPIO.IN)
lastMotion = time.time()
#Initialize 'currentname' to trigger only when a new person is identified.
currentname = "unknown"
#Determine faces from encodings.pickle file model created from train_model.py
encodingsP = "encodings.pickle"
# load the known faces and embeddings along with OpenCV's Haar
# cascade for face detection
print("[INFO] loading encodings + face detector...")
data = pickle.loads(open(encodingsP, "rb").read())
# initialize the video stream and allow the camera sensor to warm up
# Set the ser to the followng
# src = 0 : for the build in single web cam, could be your laptop webcam
# src = 2 : I had to set it to 2 inorder to use the USB webcam attached to my laptop
vs = VideoStream(src=0).start()
#vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
# start the FPS counter
fps = FPS().start()
# loop over frames from the video file stream
while True:
if GPIO.input(PIR_PIN) or time.time() < lastMotion + 15:
# if motion input, start timer again.
if GPIO.input(PIR_PIN):
lastMotion = time.time()
# grab the frame from the threaded video stream and resize it
# to 500px (to speedup processing)
frame = vs.read()
frame = imutils.resize(frame, width=500)
# Detect the fce boxes
boxes = face_recognition.face_locations(frame)
# compute the facial embeddings for each face bounding box
encodings = face_recognition.face_encodings(frame, boxes)
names = []
# loop over the facial embeddings
start = time.time()
truecounter = 0
SimilarityScore = 1
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],encoding)
name = "Unknown"
#if face is not recognized, then print Unknown
for j in matches:
if j == True:
truecounter = truecounter + 1
mat = len(matches)
SimilarityScore = int(float(truecounter) / mat * 100)
SS = str(SimilarityScore) + "%"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number
# of votes (note: in the event of an unlikely tie Python
# will select first entry in the dictionary)
name = max(counts, key=counts.get)
#If someone in your dataset is identified, print their name on the screen
if currentname != name:
currentname = name
print("------ Name ------")
print(currentname)
end = time.time()
print("------ Similarity Score ------")
print(SS)
print("------ Time taken ------")
print(end - start)
landmarks = face_recognition.face_landmarks(frame)
print("------ Landmarks ------")
print("Chin:", len(landmarks[0]["chin"]), "points")
print("Left eyebrow:", len(landmarks[0]["left_eyebrow"]), "points")
print("Right eyebrow:", len(landmarks[0]["right_eyebrow"]), "points")
print("Nose bridge:", len(landmarks[0]["nose_bridge"]), "points")
print("Nose tip:", len(landmarks[0]["nose_tip"]), "points")
print("Left eye:", len(landmarks[0]["left_eye"]), "points")
print("Right eye:", len(landmarks[0]["right_eye"]), "points")
print("Top lip:", len(landmarks[0]["top_lip"]), " points")
print("Bottom lip:", len(landmarks[0]["bottom_lip"]), "points")
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image - color is in BGR
cv2.rectangle(frame, (left, top), (right, bottom),
(0, 255, 225), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
.8, (0, 255, 255), 2)
# display the image to our screen
cv2.imshow("Facial Recognition is Running", frame)
key = cv2.waitKey(1) & 0xFF
# quit when 'q' key is pressed
if key == ord("q"):
break
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()