import streamlit as st import cv2 import mediapipe as mp import matplotlib.pyplot as plt import numpy as np # Define the function to calculate angle between the legs def angle_between_the_legs(image): """ Calculate the angle between the legs using MediaPipe pose estimation. Args: image: Input image in BGR format. Returns: A tuple containing: - The annotated image with visualization - Left leg angle (degrees) - Right leg angle (degrees) - Angle between legs (degrees) """ # Initialize MediaPipe Pose mp_pose = mp.solutions.pose mp_drawing = mp.solutions.drawing_utils # Convert the image to RGB (MediaPipe requires RGB images) image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Initialize the Pose model with mp_pose.Pose(static_image_mode=True, model_complexity=2, enable_segmentation=False) as pose: # Process the image results = pose.process(image_rgb) # Check if pose landmarks were detected if not results.pose_landmarks: print("No pose landmarks detected.") return image, None, None, None # Create a copy of the image for annotation annotated_image = image.copy() # Get landmark coordinates landmarks = results.pose_landmarks.landmark # Get relevant landmarks for angle calculation # Center hip point (mid-point between left and right hip) mid_hip_x = (landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x + landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].x) / 2 mid_hip_y = (landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y + landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].y) / 2 # Get image dimensions for converting normalized coordinates h, w, _ = annotated_image.shape mid_hip = (int(mid_hip_x * w), int(mid_hip_y * h)) # Get coordinates for left and right ankles left_ankle = (int(landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x * w), int(landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].y * h)) right_ankle = (int(landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x * w), int(landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].y * h)) # Draw the center vertical line from mid-hip down center_bottom = (mid_hip[0], h) cv2.line(annotated_image, mid_hip, center_bottom, (0, 255, 255), 2) # Draw lines from mid-hip to each ankle cv2.line(annotated_image, mid_hip, left_ankle, (255, 0, 0), 2) # Blue line to left ankle cv2.line(annotated_image, mid_hip, right_ankle, (0, 0, 255), 2) # Red line to right ankle # Calculate vectors # Vector for center line (pointing down) center_vector = np.array([0, 1]) # Vertical down # Vector for left leg left_leg_vector = np.array([left_ankle[0] - mid_hip[0], left_ankle[1] - mid_hip[1]]) if np.linalg.norm(left_leg_vector) > 0: left_leg_vector = left_leg_vector / np.linalg.norm(left_leg_vector) # Vector for right leg right_leg_vector = np.array([right_ankle[0] - mid_hip[0], right_ankle[1] - mid_hip[1]]) if np.linalg.norm(right_leg_vector) > 0: right_leg_vector = right_leg_vector / np.linalg.norm(right_leg_vector) # Calculate angles using dot product: angle = arccos(dot(v1, v2)) # Note: We're using the normalized vectors for angle calculation left_angle_rad = np.arccos(np.clip(np.dot(center_vector, left_leg_vector), -1.0, 1.0)) right_angle_rad = np.arccos(np.clip(np.dot(center_vector, right_leg_vector), -1.0, 1.0)) # Convert to degrees left_angle_deg = np.degrees(left_angle_rad) right_angle_deg = np.degrees(right_angle_rad) # If left ankle is to the left of center, the angle is negative if left_ankle[0] < mid_hip[0]: left_angle_deg = -left_angle_deg # If right ankle is to the right of center, the angle is positive if right_ankle[0] > mid_hip[0]: right_angle_deg = right_angle_deg else: right_angle_deg = -right_angle_deg # Calculate the total angle between legs angle_between_legs = 2 * (abs(left_angle_deg) + abs(right_angle_deg)) # Add text annotations with angle values cv2.putText(annotated_image, f"Left angle: {left_angle_deg:.1f}°", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) cv2.putText(annotated_image, f"Right angle: {right_angle_deg:.1f}°", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.putText(annotated_image, f"Total angle: {angle_between_legs:.1f}°", (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) # Add marker for center hip point cv2.circle(annotated_image, mid_hip, 5, (255, 255, 0), -1) # Draw pose landmarks on the image mp_drawing.draw_landmarks( annotated_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS, landmark_drawing_spec=mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2), connection_drawing_spec=mp_drawing.DrawingSpec(color=(255, 0, 0), thickness=2)) # Convert the annotated image back to RGB for display annotated_image_rgb = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) return annotated_image_rgb, left_angle_deg, right_angle_deg, angle_between_legs # Define the function to calculate the tilt of the body def tilt_of_body(image): """ Calculate the tilt angle of the body by measuring the angle between a vertical line (y-axis) and a line connecting the shoulders. Args: image: Input image in BGR format. Returns: A tuple containing: - The annotated image with visualization - Tilt angle in degrees """ # Initialize MediaPipe Pose mp_pose = mp.solutions.pose mp_drawing = mp.solutions.drawing_utils # Convert the image to RGB (MediaPipe requires RGB images) image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Initialize the Pose model with mp_pose.Pose(static_image_mode=True, model_complexity=2, enable_segmentation=False) as pose: # Process the image results = pose.process(image_rgb) # Check if pose landmarks were detected if not results.pose_landmarks: print("No pose landmarks detected.") return image, None # Create a copy of the image for annotation annotated_image = image.copy() # Get landmark coordinates landmarks = results.pose_landmarks.landmark # Get image dimensions for converting normalized coordinates h, w, _ = annotated_image.shape # Get relevant landmarks # Left shoulder point left_shoulder = (int(landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x * w), int(landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y * h)) # Right shoulder point right_shoulder = (int(landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].x * w), int(landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y * h)) # Draw the vertical line (y-axis) from mid-hip vertical_top = (w // 2, 0) vertical_bottom = (w // 2, h) cv2.line(annotated_image, vertical_top, vertical_bottom, (0, 255, 255), 2) # Draw the line connecting the shoulders cv2.line(annotated_image, left_shoulder, right_shoulder, (255, 0, 0), 2) # Calculate vectors for angle measurement vertical_vector = np.array([0, 1]) # Vertical direction (down is positive in image coordinates) shoulder_vector = np.array([right_shoulder[0] - left_shoulder[0], right_shoulder[1] - left_shoulder[1]]) # Normalize shoulder vector if np.linalg.norm(shoulder_vector) > 0: shoulder_vector = shoulder_vector / np.linalg.norm(shoulder_vector) # Calculate the angle between the vertical line and the shoulder line dot_product = np.dot(vertical_vector, shoulder_vector) cross_product = np.cross(np.array([vertical_vector[0], vertical_vector[1], 0]), np.array([shoulder_vector[0], shoulder_vector[1], 0]))[2] # Calculate angle using arccos angle_rad = np.arccos(np.clip(dot_product, -1.0, 1.0)) angle_deg = np.degrees(angle_rad) # Determine the direction of tilt if cross_product < 0: angle_deg = -angle_deg # Add text annotation with tilt angle value cv2.putText(annotated_image, f"Tilt angle: {angle_deg:.1f}°", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) # Add markers for key points cv2.circle(annotated_image, left_shoulder, 5, (0, 255, 0), -1) # Green for left shoulder cv2.circle(annotated_image, right_shoulder, 5, (0, 255, 0), -1) # Green for right shoulder # Draw pose landmarks on the image mp_drawing.draw_landmarks( annotated_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS, landmark_drawing_spec=mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2), connection_drawing_spec=mp_drawing.DrawingSpec(color=(255, 0, 0), thickness=2)) # Convert the annotated image back to RGB for display annotated_image_rgb = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) return annotated_image_rgb, angle_deg # Streamlit app st.title("Body and Leg Pose Analysis") # Upload image uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) if uploaded_file is not None: # Load the image using OpenCV image = np.array(bytearray(uploaded_file.read()), dtype=np.uint8) image = cv2.imdecode(image, cv2.IMREAD_COLOR) # Analyze angles between legs and tilt of the body annotated_image_legs, left_angle, right_angle, total_angle = angle_between_the_legs(image) annotated_image_tilt, tilt_angle = tilt_of_body(image) # Show the annotated images st.image(annotated_image_legs, caption=f"Leg Angles (Total: {total_angle:.1f}°)", use_column_width=True) st.image(annotated_image_tilt, caption=f"Body Tilt Angle: {tilt_angle:.1f}°", use_column_width=True)