David Driscoll
FaceMesh
91863a8
raw
history blame
17.2 kB
import gradio as gr
import cv2
import numpy as np
import torch
from torchvision import models, transforms
from torchvision.models.detection import FasterRCNN_ResNet50_FPN_Weights
from PIL import Image
import mediapipe as mp
from fer import FER # Facial emotion recognition
from transformers import AutoFeatureExtractor, AutoModel # (Unused now for facial recognition)
import onnxruntime as rt # New import for ONNX Runtime
# -----------------------------
# Configuration
# -----------------------------
SKIP_RATE = 1 # For image processing, always run the analysis
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
DESIRED_SIZE = (640, 480)
# -----------------------------
# Global caches for overlay info and frame counters
# -----------------------------
posture_cache = {"landmarks": None, "text": "Initializing...", "counter": 0}
emotion_cache = {"text": "Initializing...", "counter": 0}
objects_cache = {"boxes": None, "text": "Initializing...", "object_list_text": "", "counter": 0}
faces_cache = {"boxes": None, "text": "Initializing...", "counter": 0}
# -----------------------------
# Initialize Models and Helpers
# -----------------------------
# MediaPipe Pose, Face Detection, and Face Mesh
mp_pose = mp.solutions.pose
pose = mp_pose.Pose()
mp_drawing = mp.solutions.drawing_utils
mp_face_detection = mp.solutions.face_detection
face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.5)
# Object Detection using Faster R-CNN
object_detection_model = models.detection.fasterrcnn_resnet50_fpn(
weights=FasterRCNN_ResNet50_FPN_Weights.DEFAULT
)
object_detection_model.eval().to(device)
obj_transform = transforms.Compose([transforms.ToTensor()])
# Initialize the FER emotion detector (using the FER package)
emotion_detector = FER(mtcnn=True)
# Retrieve object categories from model weights metadata
object_categories = FasterRCNN_ResNet50_FPN_Weights.DEFAULT.meta["categories"]
# -----------------------------
# Facial Recognition Model (Marltgap/FaceTransformerOctupletLoss ONNX)
# (No longer used in the UI; kept here for reference)
# -----------------------------
facial_recognition_onnx = rt.InferenceSession("FaceTransformerOctupletLoss.onnx", providers=rt.get_available_providers())
# -----------------------------
# Overlay Drawing Functions
# -----------------------------
def draw_posture_overlay(raw_frame, landmarks):
for connection in mp_pose.POSE_CONNECTIONS:
start_idx, end_idx = connection
if start_idx < len(landmarks) and end_idx < len(landmarks):
start_point = landmarks[start_idx]
end_point = landmarks[end_idx]
cv2.line(raw_frame, start_point, end_point, (50, 205, 50), 2)
for (x, y) in landmarks:
cv2.circle(raw_frame, (x, y), 4, (50, 205, 50), -1)
return raw_frame
def draw_boxes_overlay(raw_frame, boxes, color):
for (x1, y1, x2, y2) in boxes:
cv2.rectangle(raw_frame, (x1, y1), (x2, y2), color, 2)
return raw_frame
# -----------------------------
# Heavy (Synchronous) Detection Functions
# -----------------------------
def compute_posture_overlay(image):
frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
h, w, _ = frame_bgr.shape
frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE)
small_h, small_w, _ = frame_bgr_small.shape
frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB)
pose_results = pose.process(frame_rgb_small)
if pose_results.pose_landmarks:
landmarks = []
for lm in pose_results.pose_landmarks.landmark:
x = int(lm.x * small_w * (w / small_w))
y = int(lm.y * small_h * (h / small_h))
landmarks.append((x, y))
text = "Posture detected"
else:
landmarks = []
text = "No posture detected"
return landmarks, text
def compute_emotion_overlay(image):
frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE)
frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB)
emotions = emotion_detector.detect_emotions(frame_rgb_small)
if emotions:
top_emotion, score = max(emotions[0]["emotions"].items(), key=lambda x: x[1])
text = f"{top_emotion} ({score:.2f})"
else:
text = "No face detected"
return text
def compute_objects_overlay(image):
frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE)
frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB)
image_pil = Image.fromarray(frame_rgb_small)
img_tensor = obj_transform(image_pil).to(device)
with torch.no_grad():
detections = object_detection_model([img_tensor])[0]
threshold = 0.8
boxes = []
object_list = []
for box, score, label in zip(detections["boxes"], detections["scores"], detections["labels"]):
if score > threshold:
boxes.append(tuple(box.int().cpu().numpy()))
label_idx = int(label)
label_name = object_categories[label_idx] if label_idx < len(object_categories) else "Unknown"
object_list.append(f"{label_name} ({score:.2f})")
text = f"Detected {len(boxes)} object(s)" if boxes else "No objects detected"
object_list_text = " | ".join(object_list) if object_list else "None"
return boxes, text, object_list_text
def compute_faces_overlay(image):
frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
h, w, _ = frame_bgr.shape
frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE)
small_h, small_w, _ = frame_bgr_small.shape
frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB)
face_results = face_detection.process(frame_rgb_small)
boxes = []
if face_results.detections:
for detection in face_results.detections:
bbox = detection.location_data.relative_bounding_box
x = int(bbox.xmin * small_w)
y = int(bbox.ymin * small_h)
box_w = int(bbox.width * small_w)
box_h = int(bbox.height * small_h)
boxes.append((x, y, x + box_w, y + box_h))
text = f"Detected {len(boxes)} face(s)"
else:
text = "No faces detected"
return boxes, text
# -----------------------------
# New Facemesh Functions
# -----------------------------
def compute_facemesh_overlay(image):
"""
Uses MediaPipe Face Mesh to detect and draw facial landmarks.
"""
frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
h, w, _ = frame_bgr.shape
# Initialize Face Mesh in static mode
face_mesh = mp.solutions.face_mesh.FaceMesh(
static_image_mode=True, max_num_faces=1, refine_landmarks=True, min_detection_confidence=0.5
)
results = face_mesh.process(cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB))
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
for landmark in face_landmarks.landmark:
x = int(landmark.x * w)
y = int(landmark.y * h)
cv2.circle(frame_bgr, (x, y), 1, (0, 255, 0), -1)
text = "Facemesh detected"
else:
text = "No facemesh detected"
face_mesh.close()
return frame_bgr, text
def analyze_facemesh(image):
annotated_image, text = compute_facemesh_overlay(image)
return annotated_image, f"<div style='color: lime !important;'>Facemesh Analysis: {text}</div>"
# -----------------------------
# (Retained) Facial Recognition Function (Not used in UI anymore)
# -----------------------------
def compute_facial_recognition_vector(image):
"""
Detects a face using MediaPipe, crops and resizes it to 112x112, then computes its embedding
vector using the Marltgap FaceTransformerOctupletLoss ONNX model.
"""
frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE)
frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB)
face_results = face_detection.process(frame_rgb_small)
if face_results.detections:
detection = face_results.detections[0]
bbox = detection.location_data.relative_bounding_box
h, w, _ = frame_rgb_small.shape
x = int(bbox.xmin * w)
y = int(bbox.ymin * h)
box_w = int(bbox.width * w)
box_h = int(bbox.height * h)
face_crop = frame_rgb_small[y:y+box_h, x:x+box_w]
# Resize the face crop to the required dimensions: 112x112
face_crop_resized = cv2.resize(face_crop, (112, 112))
# Convert image to float32 (values between 0 and 255)
input_image = face_crop_resized.astype(np.float32)
# Run inference using the ONNX model
outputs = facial_recognition_onnx.run(None, {"input_image": input_image})
embedding = outputs[0][0] # Assuming the output shape is (1, 512)
vector_str = np.array2string(embedding, precision=2, separator=',')
return face_crop, vector_str
else:
return np.array(image), "No face detected"
# -----------------------------
# Main Analysis Functions for Single Image
# -----------------------------
def analyze_posture_current(image):
global posture_cache
posture_cache["counter"] += 1
current_frame = np.array(image)
if posture_cache["counter"] % SKIP_RATE == 0 or posture_cache["landmarks"] is None:
landmarks, text = compute_posture_overlay(image)
posture_cache["landmarks"] = landmarks
posture_cache["text"] = text
output = current_frame.copy()
if posture_cache["landmarks"]:
output = draw_posture_overlay(output, posture_cache["landmarks"])
return output, f"<div style='color: lime !important;'>Posture Analysis: {posture_cache['text']}</div>"
def analyze_emotion_current(image):
global emotion_cache
emotion_cache["counter"] += 1
current_frame = np.array(image)
if emotion_cache["counter"] % SKIP_RATE == 0 or emotion_cache["text"] is None:
text = compute_emotion_overlay(image)
emotion_cache["text"] = text
return current_frame, f"<div style='color: lime !important;'>Emotion Analysis: {emotion_cache['text']}</div>"
def analyze_objects_current(image):
global objects_cache
objects_cache["counter"] += 1
current_frame = np.array(image)
if objects_cache["counter"] % SKIP_RATE == 0 or objects_cache["boxes"] is None:
boxes, text, object_list_text = compute_objects_overlay(image)
objects_cache["boxes"] = boxes
objects_cache["text"] = text
objects_cache["object_list_text"] = object_list_text
output = current_frame.copy()
if objects_cache["boxes"]:
output = draw_boxes_overlay(output, objects_cache["boxes"], (255, 255, 0))
combined_text = f"Object Detection: {objects_cache['text']}<br>Details: {objects_cache['object_list_text']}"
return output, f"<div style='color: lime !important;'>{combined_text}</div>"
def analyze_faces_current(image):
global faces_cache
faces_cache["counter"] += 1
current_frame = np.array(image)
if faces_cache["counter"] % SKIP_RATE == 0 or faces_cache["boxes"] is None:
boxes, text = compute_faces_overlay(image)
faces_cache["boxes"] = boxes
faces_cache["text"] = text
output = current_frame.copy()
if faces_cache["boxes"]:
output = draw_boxes_overlay(output, faces_cache["boxes"], (0, 0, 255))
return output, f"<div style='color: lime !important;'>Face Detection: {faces_cache['text']}</div>"
# (The old facial recognition analysis function is retained below but not linked to any UI tab)
def analyze_facial_recognition(image):
# Compute and return the facial vector (and the cropped face)
face_crop, vector_str = compute_facial_recognition_vector(image)
return face_crop, f"<div style='color: lime !important;'>Facial Vector: {vector_str}</div>"
def analyze_all(image):
current_frame = np.array(image).copy()
landmarks, posture_text = compute_posture_overlay(image)
if landmarks:
current_frame = draw_posture_overlay(current_frame, landmarks)
emotion_text = compute_emotion_overlay(image)
boxes_obj, objects_text, object_list_text = compute_objects_overlay(image)
if boxes_obj:
current_frame = draw_boxes_overlay(current_frame, boxes_obj, (255, 255, 0))
boxes_face, faces_text = compute_faces_overlay(image)
if boxes_face:
current_frame = draw_boxes_overlay(current_frame, boxes_face, (0, 0, 255))
combined_text = (
f"<b>Posture Analysis:</b> {posture_text}<br>"
f"<b>Emotion Analysis:</b> {emotion_text}<br>"
f"<b>Object Detection:</b> {objects_text}<br>"
f"<b>Detected Objects:</b> {object_list_text}<br>"
f"<b>Face Detection:</b> {faces_text}"
)
if object_list_text and object_list_text != "None":
description_text = f"Image Description: The scene features {object_list_text}."
else:
description_text = "Image Description: No prominent objects detected."
combined_text += f"<br><br><div style='border:1px solid lime; padding:10px; box-shadow: 0 0 10px lime;'><b>{description_text}</b></div>"
combined_text_html = f"<div style='color: lime !important;'>{combined_text}</div>"
return current_frame, combined_text_html
# -----------------------------
# Custom CSS (High-Tech Neon Theme)
# -----------------------------
custom_css = """
@import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
body {
background-color: #0e0e0e;
font-family: 'Orbitron', sans-serif;
color: #32CD32;
}
.gradio-container {
background: linear-gradient(135deg, #1a1a1a, #333333);
border: 2px solid #32CD32;
box-shadow: 0 0 15px #32CD32;
border-radius: 10px;
padding: 20px;
max-width: 1200px;
margin: auto;
}
.gradio-title, .gradio-description, .tab-item, .tab-item * {
color: #32CD32 !important;
text-shadow: 0 0 10px #32CD32;
}
input, button, .output {
border: 1px solid #32CD32;
box-shadow: 0 0 8px #32CD32;
color: #32CD32;
}
"""
# -----------------------------
# Create Individual Interfaces for Image Processing
# -----------------------------
posture_interface = gr.Interface(
fn=analyze_posture_current,
inputs=gr.Image(label="Upload an Image for Posture Analysis"),
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.HTML(label="Posture Analysis")],
title="Posture",
description="Detects your posture using MediaPipe with connector lines.",
live=False
)
emotion_interface = gr.Interface(
fn=analyze_emotion_current,
inputs=gr.Image(label="Upload an Image for Emotion Analysis"),
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.HTML(label="Emotion Analysis")],
title="Emotion",
description="Detects facial emotions using FER.",
live=False
)
objects_interface = gr.Interface(
fn=analyze_objects_current,
inputs=gr.Image(label="Upload an Image for Object Detection"),
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.HTML(label="Object Detection")],
title="Objects",
description="Detects objects using a pretrained Faster R-CNN.",
live=False
)
faces_interface = gr.Interface(
fn=analyze_faces_current,
inputs=gr.Image(label="Upload an Image for Face Detection"),
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.HTML(label="Face Detection")],
title="Faces",
description="Detects faces using MediaPipe.",
live=False
)
# -----------------------------
# New Facemesh Interface (Replaces the old Facial Recognition tab)
# -----------------------------
facemesh_interface = gr.Interface(
fn=analyze_facemesh,
inputs=gr.Image(label="Upload an Image for Facemesh"),
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.HTML(label="Facemesh Analysis")],
title="Facemesh",
description="Detects facial landmarks using MediaPipe Face Mesh.",
live=False
)
all_interface = gr.Interface(
fn=analyze_all,
inputs=gr.Image(label="Upload an Image for All Inferences"),
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.HTML(label="Combined Analysis")],
title="All Inferences",
description="Runs posture, emotion, object, and face detection all at once.",
live=False
)
tabbed_interface = gr.TabbedInterface(
interface_list=[
posture_interface,
emotion_interface,
objects_interface,
faces_interface,
facemesh_interface,
all_interface
],
tab_names=[
"Posture",
"Emotion",
"Objects",
"Faces",
"Facemesh",
"All Inferences"
]
)
# -----------------------------
# Wrap in a Blocks Layout and Launch
# -----------------------------
demo = gr.Blocks(css=custom_css)
with demo:
gr.Markdown("<h1 class='gradio-title' style='color: #32CD32;'>Multi-Analysis Image App</h1>")
gr.Markdown("<p class='gradio-description' style='color: #32CD32;'>Upload an image to run high-tech analysis for posture, emotions, objects, faces, and facemesh landmarks.</p>")
tabbed_interface.render()
if __name__ == "__main__":
demo.launch()