David Driscoll
commited on
Commit
·
8e73638
1
Parent(s):
a46f53e
Update app
Browse files
app.py
CHANGED
@@ -37,7 +37,7 @@ emotion_detector = FER(mtcnn=True)
|
|
37 |
|
38 |
def analyze_posture(image):
|
39 |
"""
|
40 |
-
Takes an image
|
41 |
and returns an annotated image and a text summary.
|
42 |
"""
|
43 |
# Convert from PIL (RGB) to OpenCV BGR format
|
@@ -61,7 +61,7 @@ def analyze_posture(image):
|
|
61 |
def analyze_emotion(image):
|
62 |
"""
|
63 |
Uses FER to detect facial emotions from the captured image.
|
64 |
-
Returns the
|
65 |
"""
|
66 |
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
67 |
# FER expects an RGB image
|
@@ -103,7 +103,7 @@ def analyze_objects(image):
|
|
103 |
def analyze_faces(image):
|
104 |
"""
|
105 |
Uses MediaPipe face detection to identify faces in the image.
|
106 |
-
Returns an annotated image with
|
107 |
"""
|
108 |
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
109 |
output_frame = frame.copy()
|
@@ -130,7 +130,6 @@ def analyze_faces(image):
|
|
130 |
# -----------------------------
|
131 |
custom_css = """
|
132 |
@import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
|
133 |
-
|
134 |
body {
|
135 |
background-color: #0e0e0e;
|
136 |
color: #e0e0e0;
|
@@ -156,9 +155,10 @@ body {
|
|
156 |
# -----------------------------
|
157 |
# Create Individual Interfaces for Each Analysis
|
158 |
# -----------------------------
|
|
|
159 |
posture_interface = gr.Interface(
|
160 |
fn=analyze_posture,
|
161 |
-
inputs=gr.
|
162 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Posture Analysis")],
|
163 |
title="Posture Analysis",
|
164 |
description="Detects your posture using MediaPipe."
|
@@ -166,7 +166,7 @@ posture_interface = gr.Interface(
|
|
166 |
|
167 |
emotion_interface = gr.Interface(
|
168 |
fn=analyze_emotion,
|
169 |
-
inputs=gr.
|
170 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Emotion Analysis")],
|
171 |
title="Emotion Analysis",
|
172 |
description="Detects facial emotions using FER."
|
@@ -174,7 +174,7 @@ emotion_interface = gr.Interface(
|
|
174 |
|
175 |
objects_interface = gr.Interface(
|
176 |
fn=analyze_objects,
|
177 |
-
inputs=gr.
|
178 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Object Detection")],
|
179 |
title="Object Detection",
|
180 |
description="Detects objects using a pretrained Faster R-CNN."
|
@@ -182,7 +182,7 @@ objects_interface = gr.Interface(
|
|
182 |
|
183 |
faces_interface = gr.Interface(
|
184 |
fn=analyze_faces,
|
185 |
-
inputs=gr.
|
186 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Face Detection")],
|
187 |
title="Face Detection",
|
188 |
description="Detects faces using MediaPipe."
|
|
|
37 |
|
38 |
def analyze_posture(image):
|
39 |
"""
|
40 |
+
Takes an image captured from the webcam, processes it with MediaPipe Pose,
|
41 |
and returns an annotated image and a text summary.
|
42 |
"""
|
43 |
# Convert from PIL (RGB) to OpenCV BGR format
|
|
|
61 |
def analyze_emotion(image):
|
62 |
"""
|
63 |
Uses FER to detect facial emotions from the captured image.
|
64 |
+
Returns the image and a text summary.
|
65 |
"""
|
66 |
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
67 |
# FER expects an RGB image
|
|
|
103 |
def analyze_faces(image):
|
104 |
"""
|
105 |
Uses MediaPipe face detection to identify faces in the image.
|
106 |
+
Returns an annotated image with bounding boxes and a text summary.
|
107 |
"""
|
108 |
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
109 |
output_frame = frame.copy()
|
|
|
130 |
# -----------------------------
|
131 |
custom_css = """
|
132 |
@import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
|
|
|
133 |
body {
|
134 |
background-color: #0e0e0e;
|
135 |
color: #e0e0e0;
|
|
|
155 |
# -----------------------------
|
156 |
# Create Individual Interfaces for Each Analysis
|
157 |
# -----------------------------
|
158 |
+
# Use gr.inputs.Camera for webcam capture (legacy API)
|
159 |
posture_interface = gr.Interface(
|
160 |
fn=analyze_posture,
|
161 |
+
inputs=gr.inputs.Camera(label="Capture Your Posture"),
|
162 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Posture Analysis")],
|
163 |
title="Posture Analysis",
|
164 |
description="Detects your posture using MediaPipe."
|
|
|
166 |
|
167 |
emotion_interface = gr.Interface(
|
168 |
fn=analyze_emotion,
|
169 |
+
inputs=gr.inputs.Camera(label="Capture Your Face"),
|
170 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Emotion Analysis")],
|
171 |
title="Emotion Analysis",
|
172 |
description="Detects facial emotions using FER."
|
|
|
174 |
|
175 |
objects_interface = gr.Interface(
|
176 |
fn=analyze_objects,
|
177 |
+
inputs=gr.inputs.Camera(label="Capture the Scene"),
|
178 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Object Detection")],
|
179 |
title="Object Detection",
|
180 |
description="Detects objects using a pretrained Faster R-CNN."
|
|
|
182 |
|
183 |
faces_interface = gr.Interface(
|
184 |
fn=analyze_faces,
|
185 |
+
inputs=gr.inputs.Camera(label="Capture Your Face"),
|
186 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Face Detection")],
|
187 |
title="Face Detection",
|
188 |
description="Detects faces using MediaPipe."
|