-
Notifications
You must be signed in to change notification settings - Fork 0
/
Face_Recognition.py
161 lines (117 loc) · 4.72 KB
/
Face_Recognition.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 29 18:28:30 2019
@author: Sahil
"""
import numpy as np
import os
import cv2
import pyautogui
import imutils.paths as paths
import face_recognition
import pickle
import imutils
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +'haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
path = "E:\\Project\\dataset\\"# path were u want store the data set
def data():
id = pyautogui.prompt(text="""Enter Username >""", title='Recognition', default='none')
os.mkdir(path+str(id))
sampleN=0;
while 1:
ret, img = cap.read()
frame = img.copy()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
sampleN=sampleN+1;
cv2.imwrite(path+str(id)+ "\\" +str(sampleN)+ ".jpg", gray[y:y+h, x:x+w])
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cv2.waitKey(100)
cv2.imshow('img',img)
cv2.waitKey(1)
if sampleN >14 :
break
cap.release()
cv2.destroyAllWindows()
def train():
dataset = "E:\\Project\\dataset\\"# path of the data set
module = "E:\\Project\\encodings\\encoding1.pickle" # were u want to store the pickle file
imagepaths = list(paths.list_images(dataset))
knownEncodings = []
knownNames = []
for (i, imagePath) in enumerate(imagepaths):
print("[INFO] processing image {}/{}".format(i + 1,len(imagepaths)))
name = imagePath.split(os.path.sep)[-2]
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(rgb, model= "hog")
encodings = face_recognition.face_encodings(rgb, boxes)
for encoding in encodings:
knownEncodings.append(encoding)
knownNames.append(name)
print("[INFO] serializing encodings...")
data = {"encodings": knownEncodings, "names": knownNames}
output = open(module, "wb")
pickle.dump(data, output)
output.close()
def main():
encoding = "E:\\Project\\encodings\\encoding1.pickle"
data = pickle.loads(open(encoding, "rb").read())
print(data)
cap = cv2.VideoCapture(0)
if cap.isOpened :
ret, frame = cap.read()
else:
ret = False
while(ret):
ret, frame = cap.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
rgb = imutils.resize(frame, width=400)
r = frame.shape[1] / float(rgb.shape[1])
boxes = face_recognition.face_locations(rgb, model= "hog")
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
for encoding in encodings:
matches = face_recognition.compare_faces(np.array(encoding),np.array(data["encodings"]))
name = "Unknown"
if True in matches:
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
name = max(counts, key=counts.get)
names.append(name)
for ((top, right, bottom, left), name) in zip(boxes, names):
top = int(top * r)
right = int(right * r)
bottom = int(bottom * r)
left = int(left * r)
cv2.rectangle(frame, (left, top), (right, bottom),(0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,0.75, (0, 255, 0), 2)
cv2.imshow("Frame", frame)
k = cv2.waitKey(30)
stop = ord("S") # To Stop press capital S (While on the output image)
if k == stop:
break
cv2.destroyAllWindows()
cap.release()
#Options checking
opt =pyautogui.confirm(text= 'Chose an option', title='Recognition', buttons=['Detection','Recognize','Exit'])
if opt == 'Detection':
opt = pyautogui.confirm(text="""
Please look at the Webcam.\nTurn your head a little while capturing.""", title='Recognition', buttons=['Ready'])
if opt == 'Ready':
data()
train()
opt =pyautogui.confirm(text= 'Chose an option', title='Recognition', buttons=['Recognize','Exit'])
if opt == 'Recognize':
main()
if opt == 'Exit':
print("Quit the app")
if opt == 'Recognize':
main()
if opt == 'Exit':
print("Quit the app")