-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathAttendanceProjects.py
116 lines (89 loc) · 4.02 KB
/
AttendanceProjects.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
#Importing required libraries
import cv2
import numpy as np
import face_recognition
import os
from datetime import datetime
#defining path of input images
path = 'ImagesAttendance'
# Initialize some variables
images = [] # LIST CONTAINING ALL THE IMAGES
classNames = [] # LIST CONTAINING ALL THE CORRESPONDING CLASS Names
myList = os.listdir(path)
print(myList)
for cl in myList:
curImg = cv2.imread(f'{path}/{cl}')
images.append(curImg)
classNames.append(os.path.splitext(cl)[0]) #used to print only name (Elon Musk) not full class names(Elon Musk.jpg)
print(classNames)
#we will first convert it into RGB and then find its encoding using the face_encodings() function.
#Then we will append each encoding to our list.
def findEncodings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
#markAttendance is a function that requires only one input which is the name of the user.
# We can use the datetime class in the date time package to get the current time.
def markAttendance(name):
with open('Attendance.csv', 'r+') as f:
myDataList = f.readlines()
nameList = []
for line in myDataList:
entry = line.split(',')
nameList.append(entry[0])
if name not in nameList:
now = datetime.now()
dtString = now.strftime('%H:%M:%S')
f.writelines(f'\n{name},{dtString}')
#### FOR CAPTURING SCREEN RATHER THAN WEBCAM
# def captureScreen(bbox=(300,300,690+300,530+300)):
# capScr = np.array(ImageGrab.grab(bbox))
# capScr = cv2.cvtColor(capScr, cv2.COLOR_RGB2BGR)
# return capScr
#calling findEncodings function with the images list as the input arguments
encodeListKnown = findEncodings(images)
print('Encoding Complete')
def loopfunction():
#a video capture object cap defined so that we can grab frames from the webcam.
cap = cv2.VideoCapture(0)
while True:
success, img = cap.read()
# img = captureScreen()
# Resize frame of video to 1/4 size for faster face recognition processing
imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
#we can match the current face encodings to our known faces encoding list to find the matches
for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
# print(faceDis)
#finding minimum face distance
matchIndex = np.argmin(faceDis)
#we can label unknown faces with the help of using below if-else structure.
if faceDis[matchIndex] < 0.50:
name = classNames[matchIndex].upper()
markAttendance(name)
else:
name = 'Unknown'
print(name)
y1, x2, y2, x1 = faceLoc
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
# Draw a box around the face
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
# Draw a label with a name below the face
cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
cv2.imshow('Webcam', img)
# cv2.waitKey(1)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
cap.release()
cv2.destroyAllWindows()