David Driscoll commited on
Commit
5c7b604
·
1 Parent(s): a93273a

Remove onnx, again

Browse files
Files changed (1) hide show
  1. app.py +3 -49
app.py CHANGED
@@ -7,8 +7,7 @@ from torchvision.models.detection import FasterRCNN_ResNet50_FPN_Weights
7
  from PIL import Image
8
  import mediapipe as mp
9
  from fer import FER # Facial emotion recognition
10
- from transformers import AutoFeatureExtractor, AutoModel # (Unused now for facial recognition)
11
- import onnxruntime as rt # New import for ONNX Runtime
12
 
13
  # -----------------------------
14
  # Configuration
@@ -49,12 +48,6 @@ emotion_detector = FER(mtcnn=True)
49
  # Retrieve object categories from model weights metadata
50
  object_categories = FasterRCNN_ResNet50_FPN_Weights.DEFAULT.meta["categories"]
51
 
52
- # -----------------------------
53
- # Facial Recognition Model (Marltgap/FaceTransformerOctupletLoss ONNX)
54
- # (No longer used in the UI; kept here for reference)
55
- # -----------------------------
56
- facial_recognition_onnx = rt.InferenceSession("FaceTransformerOctupletLoss.onnx", providers=rt.get_available_providers())
57
-
58
  # -----------------------------
59
  # Overlay Drawing Functions
60
  # -----------------------------
@@ -151,7 +144,7 @@ def compute_faces_overlay(image):
151
  return boxes, text
152
 
153
  # -----------------------------
154
- # New Facemesh Functions
155
  # -----------------------------
156
  def compute_facemesh_overlay(image):
157
  """
@@ -180,39 +173,6 @@ def analyze_facemesh(image):
180
  annotated_image, text = compute_facemesh_overlay(image)
181
  return annotated_image, f"<div style='color: lime !important;'>Facemesh Analysis: {text}</div>"
182
 
183
- # -----------------------------
184
- # (Retained) Facial Recognition Function (Not used in UI anymore)
185
- # -----------------------------
186
- def compute_facial_recognition_vector(image):
187
- """
188
- Detects a face using MediaPipe, crops and resizes it to 112x112, then computes its embedding
189
- vector using the Marltgap FaceTransformerOctupletLoss ONNX model.
190
- """
191
- frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
192
- frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE)
193
- frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB)
194
- face_results = face_detection.process(frame_rgb_small)
195
- if face_results.detections:
196
- detection = face_results.detections[0]
197
- bbox = detection.location_data.relative_bounding_box
198
- h, w, _ = frame_rgb_small.shape
199
- x = int(bbox.xmin * w)
200
- y = int(bbox.ymin * h)
201
- box_w = int(bbox.width * w)
202
- box_h = int(bbox.height * h)
203
- face_crop = frame_rgb_small[y:y+box_h, x:x+box_w]
204
- # Resize the face crop to the required dimensions: 112x112
205
- face_crop_resized = cv2.resize(face_crop, (112, 112))
206
- # Convert image to float32 (values between 0 and 255)
207
- input_image = face_crop_resized.astype(np.float32)
208
- # Run inference using the ONNX model
209
- outputs = facial_recognition_onnx.run(None, {"input_image": input_image})
210
- embedding = outputs[0][0] # Assuming the output shape is (1, 512)
211
- vector_str = np.array2string(embedding, precision=2, separator=',')
212
- return face_crop, vector_str
213
- else:
214
- return np.array(image), "No face detected"
215
-
216
  # -----------------------------
217
  # Main Analysis Functions for Single Image
218
  # -----------------------------
@@ -266,12 +226,6 @@ def analyze_faces_current(image):
266
  output = draw_boxes_overlay(output, faces_cache["boxes"], (0, 0, 255))
267
  return output, f"<div style='color: lime !important;'>Face Detection: {faces_cache['text']}</div>"
268
 
269
- # (The old facial recognition analysis function is retained below but not linked to any UI tab)
270
- def analyze_facial_recognition(image):
271
- # Compute and return the facial vector (and the cropped face)
272
- face_crop, vector_str = compute_facial_recognition_vector(image)
273
- return face_crop, f"<div style='color: lime !important;'>Facial Vector: {vector_str}</div>"
274
-
275
  def analyze_all(image):
276
  current_frame = np.array(image).copy()
277
  landmarks, posture_text = compute_posture_overlay(image)
@@ -369,7 +323,7 @@ faces_interface = gr.Interface(
369
  )
370
 
371
  # -----------------------------
372
- # New Facemesh Interface (Replaces the old Facial Recognition tab)
373
  # -----------------------------
374
  facemesh_interface = gr.Interface(
375
  fn=analyze_facemesh,
 
7
  from PIL import Image
8
  import mediapipe as mp
9
  from fer import FER # Facial emotion recognition
10
+ # from transformers import AutoFeatureExtractor, AutoModel # (Unused now for facial recognition)
 
11
 
12
  # -----------------------------
13
  # Configuration
 
48
  # Retrieve object categories from model weights metadata
49
  object_categories = FasterRCNN_ResNet50_FPN_Weights.DEFAULT.meta["categories"]
50
 
 
 
 
 
 
 
51
  # -----------------------------
52
  # Overlay Drawing Functions
53
  # -----------------------------
 
144
  return boxes, text
145
 
146
  # -----------------------------
147
+ # New Facemesh Functions (for Facial Recognition)
148
  # -----------------------------
149
  def compute_facemesh_overlay(image):
150
  """
 
173
  annotated_image, text = compute_facemesh_overlay(image)
174
  return annotated_image, f"<div style='color: lime !important;'>Facemesh Analysis: {text}</div>"
175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  # -----------------------------
177
  # Main Analysis Functions for Single Image
178
  # -----------------------------
 
226
  output = draw_boxes_overlay(output, faces_cache["boxes"], (0, 0, 255))
227
  return output, f"<div style='color: lime !important;'>Face Detection: {faces_cache['text']}</div>"
228
 
 
 
 
 
 
 
229
  def analyze_all(image):
230
  current_frame = np.array(image).copy()
231
  landmarks, posture_text = compute_posture_overlay(image)
 
323
  )
324
 
325
  # -----------------------------
326
+ # New Facemesh Interface (Used for facial recognition via facemesh)
327
  # -----------------------------
328
  facemesh_interface = gr.Interface(
329
  fn=analyze_facemesh,