David Driscoll
commited on
Commit
·
a34dddf
1
Parent(s):
8e73638
Update app
Browse files
app.py
CHANGED
@@ -37,8 +37,8 @@ emotion_detector = FER(mtcnn=True)
|
|
37 |
|
38 |
def analyze_posture(image):
|
39 |
"""
|
40 |
-
|
41 |
-
and returns an annotated image
|
42 |
"""
|
43 |
# Convert from PIL (RGB) to OpenCV BGR format
|
44 |
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
@@ -64,7 +64,6 @@ def analyze_emotion(image):
|
|
64 |
Returns the image and a text summary.
|
65 |
"""
|
66 |
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
67 |
-
# FER expects an RGB image
|
68 |
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
69 |
emotions = emotion_detector.detect_emotions(frame_rgb)
|
70 |
if emotions:
|
@@ -155,10 +154,9 @@ body {
|
|
155 |
# -----------------------------
|
156 |
# Create Individual Interfaces for Each Analysis
|
157 |
# -----------------------------
|
158 |
-
# Use gr.inputs.Camera for webcam capture (legacy API)
|
159 |
posture_interface = gr.Interface(
|
160 |
fn=analyze_posture,
|
161 |
-
inputs=gr.
|
162 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Posture Analysis")],
|
163 |
title="Posture Analysis",
|
164 |
description="Detects your posture using MediaPipe."
|
@@ -166,7 +164,7 @@ posture_interface = gr.Interface(
|
|
166 |
|
167 |
emotion_interface = gr.Interface(
|
168 |
fn=analyze_emotion,
|
169 |
-
inputs=gr.
|
170 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Emotion Analysis")],
|
171 |
title="Emotion Analysis",
|
172 |
description="Detects facial emotions using FER."
|
@@ -174,7 +172,7 @@ emotion_interface = gr.Interface(
|
|
174 |
|
175 |
objects_interface = gr.Interface(
|
176 |
fn=analyze_objects,
|
177 |
-
inputs=gr.
|
178 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Object Detection")],
|
179 |
title="Object Detection",
|
180 |
description="Detects objects using a pretrained Faster R-CNN."
|
@@ -182,7 +180,7 @@ objects_interface = gr.Interface(
|
|
182 |
|
183 |
faces_interface = gr.Interface(
|
184 |
fn=analyze_faces,
|
185 |
-
inputs=gr.
|
186 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Face Detection")],
|
187 |
title="Face Detection",
|
188 |
description="Detects faces using MediaPipe."
|
|
|
37 |
|
38 |
def analyze_posture(image):
|
39 |
"""
|
40 |
+
Processes an image (captured via the webcam) with MediaPipe Pose,
|
41 |
+
draws pose landmarks, and returns an annotated image plus a text summary.
|
42 |
"""
|
43 |
# Convert from PIL (RGB) to OpenCV BGR format
|
44 |
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
|
|
64 |
Returns the image and a text summary.
|
65 |
"""
|
66 |
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
|
|
67 |
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
68 |
emotions = emotion_detector.detect_emotions(frame_rgb)
|
69 |
if emotions:
|
|
|
154 |
# -----------------------------
|
155 |
# Create Individual Interfaces for Each Analysis
|
156 |
# -----------------------------
|
|
|
157 |
posture_interface = gr.Interface(
|
158 |
fn=analyze_posture,
|
159 |
+
inputs=gr.Camera(label="Capture Your Posture"),
|
160 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Posture Analysis")],
|
161 |
title="Posture Analysis",
|
162 |
description="Detects your posture using MediaPipe."
|
|
|
164 |
|
165 |
emotion_interface = gr.Interface(
|
166 |
fn=analyze_emotion,
|
167 |
+
inputs=gr.Camera(label="Capture Your Face"),
|
168 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Emotion Analysis")],
|
169 |
title="Emotion Analysis",
|
170 |
description="Detects facial emotions using FER."
|
|
|
172 |
|
173 |
objects_interface = gr.Interface(
|
174 |
fn=analyze_objects,
|
175 |
+
inputs=gr.Camera(label="Capture the Scene"),
|
176 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Object Detection")],
|
177 |
title="Object Detection",
|
178 |
description="Detects objects using a pretrained Faster R-CNN."
|
|
|
180 |
|
181 |
faces_interface = gr.Interface(
|
182 |
fn=analyze_faces,
|
183 |
+
inputs=gr.Camera(label="Capture Your Face"),
|
184 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Face Detection")],
|
185 |
title="Face Detection",
|
186 |
description="Detects faces using MediaPipe."
|