Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
|
|
2 |
import cv2
|
3 |
import mediapipe as mp
|
4 |
import numpy as np
|
|
|
5 |
|
6 |
# Initialize mediapipe pose class
|
7 |
mp_pose = mp.solutions.pose
|
@@ -117,20 +118,26 @@ def classify_pose(landmarks, output_image, display=False):
|
|
117 |
return output_image, label
|
118 |
|
119 |
def detect_and_classify_pose(input_image):
|
120 |
-
|
|
|
|
|
|
|
|
|
|
|
121 |
results = pose.process(input_image)
|
122 |
pose_classification = "No pose detected"
|
123 |
if results.pose_landmarks:
|
124 |
mp_drawing.draw_landmarks(input_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
|
125 |
input_image, pose_classification = classify_pose(results.pose_landmarks.landmark, input_image)
|
126 |
-
return cv2.cvtColor(input_image, cv2.
|
127 |
|
128 |
iface = gr.Interface(
|
129 |
fn=detect_and_classify_pose,
|
130 |
-
inputs=gr.Video(),
|
131 |
outputs=["image", "text"],
|
132 |
title="Live Yoga Pose Detection and Classification",
|
133 |
description="This app detects and classifies yoga poses from the live camera feed using MediaPipe.",
|
134 |
)
|
135 |
|
136 |
-
iface.launch()
|
|
|
|
2 |
import cv2
|
3 |
import mediapipe as mp
|
4 |
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
|
7 |
# Initialize mediapipe pose class
|
8 |
mp_pose = mp.solutions.pose
|
|
|
118 |
return output_image, label
|
119 |
|
120 |
def detect_and_classify_pose(input_image):
|
121 |
+
# Convert input to numpy array if it's not
|
122 |
+
if isinstance(input_image, Image.Image):
|
123 |
+
input_image = np.array(input_image)
|
124 |
+
|
125 |
+
# Convert the image from RGB to BGR (OpenCV format)
|
126 |
+
input_image = cv2.cvtColor(input_image, cv2.COLOR_RGB2BGR)
|
127 |
results = pose.process(input_image)
|
128 |
pose_classification = "No pose detected"
|
129 |
if results.pose_landmarks:
|
130 |
mp_drawing.draw_landmarks(input_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
|
131 |
input_image, pose_classification = classify_pose(results.pose_landmarks.landmark, input_image)
|
132 |
+
return cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB), pose_classification
|
133 |
|
134 |
iface = gr.Interface(
|
135 |
fn=detect_and_classify_pose,
|
136 |
+
inputs=gr.Video(streaming=True),
|
137 |
outputs=["image", "text"],
|
138 |
title="Live Yoga Pose Detection and Classification",
|
139 |
description="This app detects and classifies yoga poses from the live camera feed using MediaPipe.",
|
140 |
)
|
141 |
|
142 |
+
iface.launch(share=True)
|
143 |
+
|