David Driscoll
commited on
Commit
·
a46f53e
1
Parent(s):
5148899
Update app
Browse files
app.py
CHANGED
@@ -73,7 +73,6 @@ def analyze_emotion(image):
|
|
73 |
else:
|
74 |
emotion_text = "No face detected for emotion analysis"
|
75 |
|
76 |
-
# For simplicity, we return the original image
|
77 |
annotated_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
78 |
return annotated_image, f"Emotion Analysis: {emotion_text}"
|
79 |
|
@@ -157,10 +156,9 @@ body {
|
|
157 |
# -----------------------------
|
158 |
# Create Individual Interfaces for Each Analysis
|
159 |
# -----------------------------
|
160 |
-
|
161 |
posture_interface = gr.Interface(
|
162 |
fn=analyze_posture,
|
163 |
-
inputs=gr.
|
164 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Posture Analysis")],
|
165 |
title="Posture Analysis",
|
166 |
description="Detects your posture using MediaPipe."
|
@@ -168,7 +166,7 @@ posture_interface = gr.Interface(
|
|
168 |
|
169 |
emotion_interface = gr.Interface(
|
170 |
fn=analyze_emotion,
|
171 |
-
inputs=gr.
|
172 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Emotion Analysis")],
|
173 |
title="Emotion Analysis",
|
174 |
description="Detects facial emotions using FER."
|
@@ -176,7 +174,7 @@ emotion_interface = gr.Interface(
|
|
176 |
|
177 |
objects_interface = gr.Interface(
|
178 |
fn=analyze_objects,
|
179 |
-
inputs=gr.
|
180 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Object Detection")],
|
181 |
title="Object Detection",
|
182 |
description="Detects objects using a pretrained Faster R-CNN."
|
@@ -184,7 +182,7 @@ objects_interface = gr.Interface(
|
|
184 |
|
185 |
faces_interface = gr.Interface(
|
186 |
fn=analyze_faces,
|
187 |
-
inputs=gr.
|
188 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Face Detection")],
|
189 |
title="Face Detection",
|
190 |
description="Detects faces using MediaPipe."
|
@@ -193,7 +191,6 @@ faces_interface = gr.Interface(
|
|
193 |
# -----------------------------
|
194 |
# Create a Tabbed Interface for All Analyses
|
195 |
# -----------------------------
|
196 |
-
|
197 |
tabbed_interface = gr.TabbedInterface(
|
198 |
interface_list=[posture_interface, emotion_interface, objects_interface, faces_interface],
|
199 |
tab_names=["Posture", "Emotion", "Objects", "Faces"]
|
@@ -205,7 +202,7 @@ tabbed_interface = gr.TabbedInterface(
|
|
205 |
demo = gr.Blocks(css=custom_css)
|
206 |
with demo:
|
207 |
gr.Markdown("<h1 class='gradio-title'>Real-Time Multi-Analysis App</h1>")
|
208 |
-
gr.Markdown("<p class='gradio-description'>Experience a high-tech
|
209 |
demo_tab = tabbed_interface
|
210 |
|
211 |
if __name__ == "__main__":
|
|
|
73 |
else:
|
74 |
emotion_text = "No face detected for emotion analysis"
|
75 |
|
|
|
76 |
annotated_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
77 |
return annotated_image, f"Emotion Analysis: {emotion_text}"
|
78 |
|
|
|
156 |
# -----------------------------
|
157 |
# Create Individual Interfaces for Each Analysis
|
158 |
# -----------------------------
|
|
|
159 |
posture_interface = gr.Interface(
|
160 |
fn=analyze_posture,
|
161 |
+
inputs=gr.Image(source="camera", label="Capture Your Posture"),
|
162 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Posture Analysis")],
|
163 |
title="Posture Analysis",
|
164 |
description="Detects your posture using MediaPipe."
|
|
|
166 |
|
167 |
emotion_interface = gr.Interface(
|
168 |
fn=analyze_emotion,
|
169 |
+
inputs=gr.Image(source="camera", label="Capture Your Face"),
|
170 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Emotion Analysis")],
|
171 |
title="Emotion Analysis",
|
172 |
description="Detects facial emotions using FER."
|
|
|
174 |
|
175 |
objects_interface = gr.Interface(
|
176 |
fn=analyze_objects,
|
177 |
+
inputs=gr.Image(source="camera", label="Capture the Scene"),
|
178 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Object Detection")],
|
179 |
title="Object Detection",
|
180 |
description="Detects objects using a pretrained Faster R-CNN."
|
|
|
182 |
|
183 |
faces_interface = gr.Interface(
|
184 |
fn=analyze_faces,
|
185 |
+
inputs=gr.Image(source="camera", label="Capture Your Face"),
|
186 |
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.Textbox(label="Face Detection")],
|
187 |
title="Face Detection",
|
188 |
description="Detects faces using MediaPipe."
|
|
|
191 |
# -----------------------------
|
192 |
# Create a Tabbed Interface for All Analyses
|
193 |
# -----------------------------
|
|
|
194 |
tabbed_interface = gr.TabbedInterface(
|
195 |
interface_list=[posture_interface, emotion_interface, objects_interface, faces_interface],
|
196 |
tab_names=["Posture", "Emotion", "Objects", "Faces"]
|
|
|
202 |
demo = gr.Blocks(css=custom_css)
|
203 |
with demo:
|
204 |
gr.Markdown("<h1 class='gradio-title'>Real-Time Multi-Analysis App</h1>")
|
205 |
+
gr.Markdown("<p class='gradio-description'>Experience a high-tech cinematic interface for real-time analysis of your posture, emotions, objects, and faces using your webcam.</p>")
|
206 |
demo_tab = tabbed_interface
|
207 |
|
208 |
if __name__ == "__main__":
|