rohitashva's picture
Update app.py
662b753 verified
raw
history blame
1.73 kB
import streamlit as st
import av
import joblib
import mediapipe as mp
import numpy as np
from streamlit_webrtc import webrtc_streamer, VideoTransformerBase
# Load trained model and label encoder
model = joblib.load("pose_classifier.joblib")
label_encoder = joblib.load("label_encoder.joblib")
# Initialize MediaPipe Pose
mp_pose = mp.solutions.pose
pose = mp_pose.Pose()
# Streamlit UI
st.title("Live Pose Classification on Hugging Face Spaces")
st.write("Using Streamlit WebRTC, OpenCV, and MediaPipe.")
class PoseClassification(VideoTransformerBase):
def transform(self, frame):
img = frame.to_ndarray(format="bgr24")
# Convert frame to RGB
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Process frame with MediaPipe Pose
results = pose.process(img_rgb)
if results.pose_landmarks:
landmarks = results.pose_landmarks.landmark
pose_data = [j.x for j in landmarks] + [j.y for j in landmarks] + \
[j.z for j in landmarks] + [j.visibility for j in landmarks]
pose_data = np.array(pose_data).reshape(1, -1)
try:
y_pred = model.predict(pose_data)
predicted_label = label_encoder.inverse_transform(y_pred)[0]
# Draw label on frame
cv2.putText(img, f"Pose: {predicted_label}", (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)
except Exception as e:
st.warning(f"⚠️ Prediction Error: {e}")
return av.VideoFrame.from_ndarray(img, format="bgr24")
# Start WebRTC streamer
webrtc_streamer(key="pose-classification", video_transformer_factory=PoseClassification)