Shafeek Saleem commited on
Commit
16eecd4
·
1 Parent(s): 682e7c7

bug fixed images - path

Browse files
.idea/ascii_demo.iml CHANGED
@@ -1,7 +1,9 @@
1
  <?xml version="1.0" encoding="UTF-8"?>
2
  <module type="PYTHON_MODULE" version="4">
3
  <component name="NewModuleRootManager">
4
- <content url="file://$MODULE_DIR$" />
 
 
5
  <orderEntry type="inheritedJdk" />
6
  <orderEntry type="sourceFolder" forTests="false" />
7
  </component>
 
1
  <?xml version="1.0" encoding="UTF-8"?>
2
  <module type="PYTHON_MODULE" version="4">
3
  <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$">
5
+ <sourceFolder url="file://$MODULE_DIR$/pages" isTestSource="false" />
6
+ </content>
7
  <orderEntry type="inheritedJdk" />
8
  <orderEntry type="sourceFolder" forTests="false" />
9
  </component>
pages/3_Training the Model.py CHANGED
@@ -3,6 +3,7 @@ from PIL import Image
3
  from utils.levels import complete_level, render_page, initialize_level
4
  from utils.login import get_login, initialize_login
5
  from utils.inference import query
 
6
  import os
7
  import time
8
  import face_recognition
@@ -18,15 +19,6 @@ LEVEL = 3
18
 
19
  PKL_PATH = 'dataset/database.pkl'
20
 
21
- def get_database():
22
- database = {}
23
-
24
- if os.path.getsize(PKL_PATH) > 0:
25
- with open(PKL_PATH, "rb") as f:
26
- unpickler = pickle.Unpickler(f)
27
- database = unpickler.load()
28
- return database
29
-
30
  def step3_page():
31
  st.header("Training the Model")
32
  st.subheader("Face encoding")
@@ -71,7 +63,7 @@ def step3_page():
71
  if st.button("Train Model"):
72
  my_bar = st.progress(0, text="Training....")
73
  if len(images) > 0:
74
- database = get_database()
75
  for i in range(100):
76
  my_bar.progress(i, text="Training....")
77
  my_bar.progress(100, text="Successfully Trained!")
@@ -85,23 +77,25 @@ def step3_page():
85
  face_id = img.split(".")[0]
86
 
87
  # check if id already exists
88
- existing_id = [i for i in database.keys()]
89
  if face_id in existing_id:
90
  st.error(f"Encoding already created for : {face_id}")
91
  else:
92
- database[face_id] = {'name': face_name,
93
- 'encoding': my_face_encoding}
 
94
  with open(PKL_PATH, 'wb') as f:
95
  pkl.dump(database, f)
96
 
97
  my_bar.progress(int((i + 1) / len(images) * 100), text="Generating face encodings...")
98
  my_bar.progress(100, text="Successfully encoded all the known faces!")
99
  st.success("Face encoding completed successfully!")
100
- if st.button("Complete"):
101
- complete_level(LEVEL)
102
  else:
103
  my_bar.empty()
104
  st.error("You have not taken any images yet! Do the previous steps first!")
 
 
105
 
106
 
107
  render_page(step3_page, LEVEL)
 
3
  from utils.levels import complete_level, render_page, initialize_level
4
  from utils.login import get_login, initialize_login
5
  from utils.inference import query
6
+ from utils.database import get_database
7
  import os
8
  import time
9
  import face_recognition
 
19
 
20
  PKL_PATH = 'dataset/database.pkl'
21
 
 
 
 
 
 
 
 
 
 
22
  def step3_page():
23
  st.header("Training the Model")
24
  st.subheader("Face encoding")
 
63
  if st.button("Train Model"):
64
  my_bar = st.progress(0, text="Training....")
65
  if len(images) > 0:
66
+ database = get_database(PKL_PATH)
67
  for i in range(100):
68
  my_bar.progress(i, text="Training....")
69
  my_bar.progress(100, text="Successfully Trained!")
 
77
  face_id = img.split(".")[0]
78
 
79
  # check if id already exists
80
+ existing_id = [database[i]["face_id"] for i in database.keys()]
81
  if face_id in existing_id:
82
  st.error(f"Encoding already created for : {face_id}")
83
  else:
84
+ database[i] = {'face_id': face_id,
85
+ 'name': face_name,
86
+ 'encoding': my_face_encoding}
87
  with open(PKL_PATH, 'wb') as f:
88
  pkl.dump(database, f)
89
 
90
  my_bar.progress(int((i + 1) / len(images) * 100), text="Generating face encodings...")
91
  my_bar.progress(100, text="Successfully encoded all the known faces!")
92
  st.success("Face encoding completed successfully!")
93
+
 
94
  else:
95
  my_bar.empty()
96
  st.error("You have not taken any images yet! Do the previous steps first!")
97
+ if st.button("Complete"):
98
+ complete_level(LEVEL)
99
 
100
 
101
  render_page(step3_page, LEVEL)
pages/4_Trying It Out.py CHANGED
@@ -1,7 +1,8 @@
1
  import streamlit as st
2
  from utils.levels import complete_level, render_page, initialize_level
3
  from utils.login import get_login, initialize_login
4
- from utils.inference import query
 
5
  import os
6
  import time
7
  import face_recognition
@@ -28,56 +29,43 @@ def step4_page():
28
  st.info(
29
  "Now that we know how our face recognition application works, let's try it out!"
30
  )
31
- face_encodings_dir = os.path.join(".sessions", get_login()["username"], "face_encodings")
32
- face_encodings = os.listdir(face_encodings_dir)
33
- known_face_encodings = []
34
- known_face_names = []
35
- if len(face_encodings) > 0:
36
- for i, face_encoding in enumerate(face_encodings):
37
- known_face_encoding = np.load(os.path.join(face_encodings_dir, face_encoding))
38
- face_name = face_encoding.split(".")[0]
39
- known_face_encodings.append(known_face_encoding)
40
- known_face_names.append(face_name)
41
-
42
- st.info("Select an image to analyze!")
43
  input_type = st.radio("Select the Input Type", ["Image", "Camera"])
 
 
 
 
44
 
45
- if input_type == "Camera":
46
- picture = st.camera_input("Take a picture")
 
 
 
 
 
 
 
 
 
47
  else:
48
- picture = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
49
- if picture:
50
- image = face_recognition.load_image_file(picture)
51
- face_locations = face_recognition.face_locations(image)
52
- face_encodings = face_recognition.face_encodings(image, face_locations)
53
-
54
- st.image(image)
55
- # Loop through each face in this image
56
- cols = st.columns(len(face_encodings))
57
- i = 0
58
- # st.info("Select the tolerance level you want for your model! (How much distance between faces to consider it a match. "
59
- # "Lower is more strict. 0.6 is typical best performance.)")
60
- # tolerance = st.slider('Select tolerance level', 0.0, 1.0, 0.3, 0.1)
61
- # if tolerance:
62
- for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
63
- # See if the face is a match for the known face(s)
64
- # matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
65
-
66
- name = "Unknown"
67
- # If a match was found in known_face_encodings, just use the first one.
68
- face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
69
-
70
- # Calculate the row sums
71
- row_sums = np.sum(face_distances, axis=1)
72
- best_match_index = np.argmin(row_sums)
73
- if best_match_index is not None:
74
- name = known_face_names[best_match_index]
75
 
76
- face_image = image[top:bottom, left:right]
77
- pil_image = Image.fromarray(face_image)
78
- cols[i].image(pil_image, use_column_width=True)
79
- cols[i].write("Person name: " +name)
80
- i+=1
 
 
 
 
 
81
 
82
  st.info("Click on the button below to complete this level!")
83
  if st.button("Complete Level"):
 
1
  import streamlit as st
2
  from utils.levels import complete_level, render_page, initialize_level
3
  from utils.login import get_login, initialize_login
4
+ from utils.inference import recognize
5
+ from utils.database import get_database
6
  import os
7
  import time
8
  import face_recognition
 
29
  st.info(
30
  "Now that we know how our face recognition application works, let's try it out!"
31
  )
32
+ # Select input type
33
+ st.info("Select your input type to analyze!")
 
 
 
 
 
 
 
 
 
 
34
  input_type = st.radio("Select the Input Type", ["Image", "Camera"])
35
+ # Put slide to adjust tolerance
36
+ tolerance = st.slider("Tolerance", 0.0, 1.0, 0.5, 0.01)
37
+ st.info(
38
+ "Tolerance is the threshold for face recognition. The lower the tolerance, the more strict the face recognition. The higher the tolerance, the more loose the face recognition.")
39
 
40
+ if input_type == "Image":
41
+ st.title("Face Recognition App")
42
+ uploaded_images = st.file_uploader("Upload", type=['jpg', 'png', 'jpeg'], accept_multiple_files=True)
43
+ if len(uploaded_images) != 0:
44
+ # Read uploaded image with face_recognition
45
+ for image in uploaded_images:
46
+ image = face_recognition.load_image_file(image)
47
+ image, name, face_id = recognize(image, tolerance)
48
+ st.image(image)
49
+ else:
50
+ st.info("Please upload an image")
51
  else:
52
+ st.title("Face Recognition App")
53
+ # Camera Settings
54
+ cam = cv2.VideoCapture(0)
55
+ cam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
56
+ cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
57
+ FRAME_WINDOW = st.image([])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
+ while True:
60
+ ret, frame = cam.read()
61
+ if not ret:
62
+ st.error("Failed to capture frame from camera")
63
+ st.info("Please turn off the other app that is using the camera and restart app")
64
+ st.stop()
65
+ image, name, face_id = recognize(frame, tolerance)
66
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
67
+ # Display name and ID of the person
68
+ FRAME_WINDOW.image(image)
69
 
70
  st.info("Click on the button below to complete this level!")
71
  if st.button("Complete Level"):
utils/database.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle as pkl
3
+
4
+ def get_database(PKL_PATH):
5
+ database = {}
6
+
7
+ if os.path.getsize(PKL_PATH) > 0:
8
+ with open(PKL_PATH, "rb") as f:
9
+ unpickler = pickle.Unpickler(f)
10
+ database = unpickler.load()
11
+ return database
utils/inference.py CHANGED
@@ -1,10 +1,28 @@
1
- import requests
 
 
 
 
 
2
 
3
- API_URL = "https://api-inference.huggingface.co/models/CynthiaCR/emotions_classifier"
4
- headers = {"Authorization": "Bearer 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"}
5
-
6
- def query(filename):
7
- with open(filename, "rb") as f:
8
- data = f.read()
9
- response = requests.post(API_URL, headers=headers, data=data)
10
- return response.json()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils.database import get_database
2
+ import os
3
+ import face_recognition
4
+ import cv2
5
+ import numpy as np
6
+ from PIL import Image
7
 
8
+ def recognize(image,tolerance):
9
+ database = get_databse()
10
+ known_encoding = [database[id]['encoding'] for id in database.keys()]
11
+ name = 'Unknown'
12
+ face_id = 'Unknown'
13
+ face_locations = face_recognition.face_locations(image)
14
+ face_encodings = face_recognition.face_encodings(image,face_locations)
15
+ for (top,right,bottom,left), face_encoding in zip(face_locations,face_encodings):
16
+ matches = face_recognition.compare_faces(known_encoding,face_encoding,tolerance=tolerance)
17
+ distance = face_recognition.face_distance(known_encoding,face_encoding)
18
+ name = 'Unknown'
19
+ face_id = 'Unknown'
20
+ if True in matches:
21
+ match_index = matches.index(True)
22
+ name = database[match_index]['name']
23
+ face_id = database[match_index]['face_id'].split("_")[1]
24
+ distance = round(distance[match_index],2)
25
+ cv2.putText(image,str(distance),(left,top-30),cv2.FONT_HERSHEY_SIMPLEX,0.75,(0,255,0),2)
26
+ cv2.rectangle(image,(left,top),(right,bottom),(0,255,0),2)
27
+ cv2.putText(image,name,(left,top-10),cv2.FONT_HERSHEY_SIMPLEX,0.75,(0,255,0),2)
28
+ return image, name, face_id