David Driscoll
commited on
Commit
·
2e20db1
1
Parent(s):
a34dddf
Update app
Browse files
app.py
CHANGED
@@ -37,8 +37,8 @@ emotion_detector = FER(mtcnn=True)
|
|
37 |
|
38 |
def analyze_posture(image):
|
39 |
"""
|
40 |
-
Processes an image
|
41 |
-
draws pose landmarks, and returns an annotated image
|
42 |
"""
|
43 |
# Convert from PIL (RGB) to OpenCV BGR format
|
44 |
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
@@ -156,7 +156,7 @@ body {
|
|
156 |
# -----------------------------
|
157 |
posture_interface = gr.Interface(
|
158 |
fn=analyze_posture,
|
159 |
-
inputs=gr.
|
160 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Posture Analysis")],
|
161 |
title="Posture Analysis",
|
162 |
description="Detects your posture using MediaPipe."
|
@@ -164,7 +164,7 @@ posture_interface = gr.Interface(
|
|
164 |
|
165 |
emotion_interface = gr.Interface(
|
166 |
fn=analyze_emotion,
|
167 |
-
inputs=gr.
|
168 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Emotion Analysis")],
|
169 |
title="Emotion Analysis",
|
170 |
description="Detects facial emotions using FER."
|
@@ -172,7 +172,7 @@ emotion_interface = gr.Interface(
|
|
172 |
|
173 |
objects_interface = gr.Interface(
|
174 |
fn=analyze_objects,
|
175 |
-
inputs=gr.
|
176 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Object Detection")],
|
177 |
title="Object Detection",
|
178 |
description="Detects objects using a pretrained Faster R-CNN."
|
@@ -180,7 +180,7 @@ objects_interface = gr.Interface(
|
|
180 |
|
181 |
faces_interface = gr.Interface(
|
182 |
fn=analyze_faces,
|
183 |
-
inputs=gr.
|
184 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Face Detection")],
|
185 |
title="Face Detection",
|
186 |
description="Detects faces using MediaPipe."
|
|
|
37 |
|
38 |
def analyze_posture(image):
|
39 |
"""
|
40 |
+
Processes an image captured from the webcam with MediaPipe Pose,
|
41 |
+
draws pose landmarks, and returns an annotated image and a text summary.
|
42 |
"""
|
43 |
# Convert from PIL (RGB) to OpenCV BGR format
|
44 |
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
|
|
156 |
# -----------------------------
|
157 |
posture_interface = gr.Interface(
|
158 |
fn=analyze_posture,
|
159 |
+
inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Posture"),
|
160 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Posture Analysis")],
|
161 |
title="Posture Analysis",
|
162 |
description="Detects your posture using MediaPipe."
|
|
|
164 |
|
165 |
emotion_interface = gr.Interface(
|
166 |
fn=analyze_emotion,
|
167 |
+
inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Face"),
|
168 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Emotion Analysis")],
|
169 |
title="Emotion Analysis",
|
170 |
description="Detects facial emotions using FER."
|
|
|
172 |
|
173 |
objects_interface = gr.Interface(
|
174 |
fn=analyze_objects,
|
175 |
+
inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture the Scene"),
|
176 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Object Detection")],
|
177 |
title="Object Detection",
|
178 |
description="Detects objects using a pretrained Faster R-CNN."
|
|
|
180 |
|
181 |
faces_interface = gr.Interface(
|
182 |
fn=analyze_faces,
|
183 |
+
inputs=gr.Image(sources=["webcam"], streaming=True, label="Capture Your Face"),
|
184 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Face Detection")],
|
185 |
title="Face Detection",
|
186 |
description="Detects faces using MediaPipe."
|