efghi7890 commited on
Commit
d4483ef
·
1 Parent(s): a1cbbc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -36
app.py CHANGED
@@ -5,19 +5,20 @@ import face_recognition
5
  import os
6
  from datetime import datetime
7
 
 
8
  def greet(video):
9
  path = "ImagesAttendance"
10
  images = []
11
  classNames = []
12
  myList = os.listdir(path)
13
  print(myList)
14
- for cl in myList
15
  curImg = cv2.imread(f'{path}/{cl}')
16
  images.append(curImg)
17
  classNames.append(os.path.splitext(cl)[0])
18
- print(classNames)
19
-
20
- def findEncoding(images):
21
  encodeList = []
22
  for img in images:
23
  img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
@@ -26,7 +27,7 @@ def greet(video):
26
  return encodeList
27
 
28
  def markAttendance(name):
29
- with open('Attendance.csv','r+') as f:
30
  myDataList = f.readlines()
31
  nameList = []
32
  for line in myDataList:
@@ -38,40 +39,41 @@ def greet(video):
38
  f.writelinbes(f'\n{name},{dtString}')
39
  encodeListKnown = findEncodings(images)
40
  print('Encoding Complete')
41
-
42
  cap = cv2.VideoCapture(video)
43
-
44
  while True:
45
- succes, img = cap.read()
46
- imgS = cv2.resize(img,(0,0),None,0.25,0.25)
47
- imgS = cv2.cvtColor(imgs, cv2.COLOR_BGR2GRAY)
48
-
49
- facesCurFrame = face:recognition.face_locations(imgS)
50
- encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
51
-
52
- for encodeFace_faceLoc in zip(encodesCurFrame, facesCurFrame):
53
- matches = face_recognition.compare_faces(encodeListKnown,encodeFace)
54
- faceDis = face_recognition.face_distance(encodeListKnown,encodeFace)
55
- matchIndex = np.argmin(faceDis)
56
-
57
- if matches[matchIndex]:
58
- name = classNames[matchIndex].upper()
59
- y1,x2,y2,x1 = faceLoc
60
- y1,x2,y2,x1 = y1*4,x2*4,y2*4,x1*4
61
- cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),2)
62
- cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILED)
63
- cv2.putText(img,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)
64
- markAttendance(name)
65
- cv2.imshow('Webcam',img)
66
- cv2.waitKey(1)
67
-
68
-
69
- return gray
70
-
 
71
  iface = gr.Interface(
72
- fn=greet,
73
- inputs=gr.Video(source = "webcam", format = "mp4", streaming = "True"),
74
- outputs="image"
75
  )
76
 
77
  iface.launch()
 
5
  import os
6
  from datetime import datetime
7
 
8
+
9
  def greet(video):
10
  path = "ImagesAttendance"
11
  images = []
12
  classNames = []
13
  myList = os.listdir(path)
14
  print(myList)
15
+ for cl in myList:
16
  curImg = cv2.imread(f'{path}/{cl}')
17
  images.append(curImg)
18
  classNames.append(os.path.splitext(cl)[0])
19
+ print(classNames)
20
+
21
+ def findEncodings(images):
22
  encodeList = []
23
  for img in images:
24
  img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
 
27
  return encodeList
28
 
29
  def markAttendance(name):
30
+ with open('Attendance.csv', 'r+') as f:
31
  myDataList = f.readlines()
32
  nameList = []
33
  for line in myDataList:
 
39
  f.writelinbes(f'\n{name},{dtString}')
40
  encodeListKnown = findEncodings(images)
41
  print('Encoding Complete')
42
+
43
  cap = cv2.VideoCapture(video)
44
+
45
  while True:
46
+ succes, img = cap.read()
47
+ imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
48
+ imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2GRAY)
49
+
50
+ facesCurFrame = face_recognition.face_locations(imgS)
51
+ encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
52
+
53
+ for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
54
+ matches = face_recognition.compare_faces(
55
+ encodeListKnown, encodeFace)
56
+ faceDis = face_recognition.face_distance(
57
+ encodeListKnown, encodeFace)
58
+ matchIndex = np.argmin(faceDis)
59
+
60
+ if matches[matchIndex]:
61
+ name = classNames[matchIndex].upper()
62
+ y1, x2, y2, x1 = faceLoc
63
+ y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4
64
+ cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
65
+ cv2.rectangle(img, (x1, y2-35), (x2, y2), (0, 255, 0), cv2.FILED)
66
+ cv2.putText(img, name, (x1+6, y2-6),
67
+ cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
68
+ markAttendance(name)
69
+ cv2.imshow('Webcam', img)
70
+ cv2.waitKey(1)
71
+
72
+
73
  iface = gr.Interface(
74
+ fn=greet,
75
+ inputs=gr.Video(source="webcam", format="mp4", streaming="True"),
76
+ outputs="image"
77
  )
78
 
79
  iface.launch()