Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,74 +4,113 @@ import numpy as np
|
|
4 |
import csv
|
5 |
from datetime import datetime
|
6 |
|
7 |
-
|
8 |
-
|
|
|
|
|
9 |
|
10 |
-
vikas_image = face_recognition.load_image_file("photos/vikas.jpg")
|
11 |
-
vikas_encoding = face_recognition.face_encodings(vikas_image)[0]
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
|
17 |
-
face_locations = []
|
18 |
-
face_encodings = []
|
19 |
-
face_names = []
|
20 |
-
s = True
|
21 |
|
22 |
-
|
23 |
-
current_date = now.strftime("%Y-%m-%d")
|
24 |
|
25 |
-
f = open(current_date + '.csv', 'w+', newline='')
|
26 |
-
lnwriter = csv.writer(f)
|
27 |
|
28 |
-
import io
|
29 |
-
import streamlit as st
|
30 |
|
31 |
-
|
|
|
|
|
|
|
32 |
|
33 |
-
run = st.checkbox('Run')
|
34 |
-
FRAME_WINDOW = st.image([])
|
35 |
-
camera = cv2.VideoCapture(0)
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
import csv
|
5 |
from datetime import datetime
|
6 |
|
7 |
+
###########################
|
8 |
+
from flask import Flask,render_template
|
9 |
+
from flask_socketio import SocketIO,emit
|
10 |
+
import base64
|
11 |
|
|
|
|
|
12 |
|
13 |
+
app = Flask(__name__)
|
14 |
+
app.config['SECRET_KEY'] = 'secret!'
|
15 |
+
socket = SocketIO(app,async_mode="eventlet")
|
16 |
|
|
|
|
|
|
|
|
|
17 |
|
18 |
+
###################################
|
|
|
19 |
|
|
|
|
|
20 |
|
|
|
|
|
21 |
|
22 |
+
video_capture=cv2.VideoCapture(1)
|
23 |
+
if not video_capture.isOpened():
|
24 |
+
print("Failed to open the video capture.")
|
25 |
+
exit()
|
26 |
|
|
|
|
|
|
|
27 |
|
28 |
+
|
29 |
+
|
30 |
+
sir_image=face_recognition.load_image_file("photos/sir.jpeg")
|
31 |
+
sir_encoding=face_recognition.face_encodings(sir_image)[0]
|
32 |
+
|
33 |
+
vikas_image=face_recognition.load_image_file("photos/vikas.jpg")
|
34 |
+
vikas_encoding=face_recognition.face_encodings(vikas_image)[0]
|
35 |
+
|
36 |
+
known_face_encoding=[sir_encoding,vikas_encoding]
|
37 |
+
|
38 |
+
known_faces_names=["Sarwan Sir","Vikas"]
|
39 |
+
|
40 |
+
students=known_faces_names.copy()
|
41 |
+
|
42 |
+
face_locations=[]
|
43 |
+
face_encodings=[]
|
44 |
+
face_names=[]
|
45 |
+
s=True
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
now=datetime.now()
|
50 |
+
current_date=now.strftime("%Y-%m-%d")
|
51 |
+
|
52 |
+
|
53 |
+
f=open(current_date+'.csv','w+',newline='')
|
54 |
+
lnwriter=csv.writer(f)
|
55 |
+
|
56 |
+
############################
|
57 |
+
def base64_to_image(base64_string):
|
58 |
+
# Extract the base64 encoded binary data from the input string
|
59 |
+
base64_data = base64_string.split(",")[1]
|
60 |
+
# Decode the base64 data to bytes
|
61 |
+
image_bytes = base64.b64decode(base64_data)
|
62 |
+
# Convert the bytes to numpy array
|
63 |
+
image_array = np.frombuffer(image_bytes, dtype=np.uint8)
|
64 |
+
# Decode the numpy array as an image using OpenCV
|
65 |
+
image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
|
66 |
+
return image
|
67 |
+
|
68 |
+
@socket.on("connect")
|
69 |
+
def test_connect():
|
70 |
+
print("Connected")
|
71 |
+
emit("my response", {"data": "Connected"})
|
72 |
+
|
73 |
+
@socket.on("image")
|
74 |
+
def receive_image(image):
|
75 |
+
return render_template("index.html" , myimage = image , cname = class_name )
|
76 |
+
while True:
|
77 |
+
# _,frame=camera.read()
|
78 |
+
frame=base64_to_image(image)
|
79 |
+
|
80 |
+
small_frame=cv2.resize(frame,(0,0),fx=0.25,fy=0.25)
|
81 |
+
rgb_small_frame=small_frame[:,:,::-1]
|
82 |
+
if s:
|
83 |
+
face_locations=face_recognition.face_locations(rgb_small_frame)
|
84 |
+
face_encodings = face_recognition.face_encodings(small_frame, face_locations)
|
85 |
+
face_names=[]
|
86 |
+
for face_encoding in face_encodings:
|
87 |
+
matches=face_recognition.compare_faces(known_face_encoding,face_encoding)
|
88 |
+
name=""
|
89 |
+
face_distance=face_recognition.face_distance(known_face_encoding,face_encoding)
|
90 |
+
best_match_index=np.argmin(face_distance)
|
91 |
+
if matches[best_match_index]:
|
92 |
+
name=known_faces_names[best_match_index]
|
93 |
|
94 |
+
face_names.append(name)
|
95 |
+
if name in known_faces_names:
|
96 |
+
if name in students:
|
97 |
+
students.remove(name)
|
98 |
+
print(students)
|
99 |
+
current_time=now.strftime("%H-%M-%S")
|
100 |
+
lnwriter.writerow([name,current_time,"Present"])
|
101 |
+
cv2.imshow("attendence system",frame)
|
102 |
+
if cv2.waitKey(1) & 0xFF==ord('q'):
|
103 |
+
break
|
104 |
|
105 |
+
f.close()
|
106 |
+
|
107 |
+
|
108 |
+
#######################################
|
109 |
+
@app.route("/")
|
110 |
+
def home():
|
111 |
+
return render_template("index.html")
|
112 |
+
|
113 |
+
if __name__ == '__main__':
|
114 |
+
# app.run(debug=True)
|
115 |
+
socket.run(app,host="0.0.0.0", port=7860)
|
116 |
+
#######################################
|