|
import streamlit as st |
|
import cv2 |
|
import mediapipe as mp |
|
import matplotlib.pyplot as plt |
|
import numpy as np |
|
|
|
|
|
def angle_between_the_legs(image): |
|
""" |
|
Calculate the angle between the legs using MediaPipe pose estimation. |
|
|
|
Args: |
|
image: Input image in BGR format. |
|
|
|
Returns: |
|
A tuple containing: |
|
- The annotated image with visualization |
|
- Left leg angle (degrees) |
|
- Right leg angle (degrees) |
|
- Angle between legs (degrees) |
|
""" |
|
|
|
mp_pose = mp.solutions.pose |
|
mp_drawing = mp.solutions.drawing_utils |
|
|
|
|
|
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
with mp_pose.Pose(static_image_mode=True, model_complexity=2, enable_segmentation=False) as pose: |
|
|
|
results = pose.process(image_rgb) |
|
|
|
|
|
if not results.pose_landmarks: |
|
print("No pose landmarks detected.") |
|
return image, None, None, None |
|
|
|
|
|
annotated_image = image.copy() |
|
|
|
|
|
landmarks = results.pose_landmarks.landmark |
|
|
|
|
|
|
|
mid_hip_x = (landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x + |
|
landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].x) / 2 |
|
mid_hip_y = (landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y + |
|
landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].y) / 2 |
|
|
|
|
|
h, w, _ = annotated_image.shape |
|
mid_hip = (int(mid_hip_x * w), int(mid_hip_y * h)) |
|
|
|
|
|
left_ankle = (int(landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x * w), |
|
int(landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].y * h)) |
|
|
|
right_ankle = (int(landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x * w), |
|
int(landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].y * h)) |
|
|
|
|
|
center_bottom = (mid_hip[0], h) |
|
cv2.line(annotated_image, mid_hip, center_bottom, (0, 255, 255), 2) |
|
|
|
|
|
cv2.line(annotated_image, mid_hip, left_ankle, (255, 0, 0), 2) |
|
cv2.line(annotated_image, mid_hip, right_ankle, (0, 0, 255), 2) |
|
|
|
|
|
|
|
center_vector = np.array([0, 1]) |
|
|
|
|
|
left_leg_vector = np.array([left_ankle[0] - mid_hip[0], left_ankle[1] - mid_hip[1]]) |
|
if np.linalg.norm(left_leg_vector) > 0: |
|
left_leg_vector = left_leg_vector / np.linalg.norm(left_leg_vector) |
|
|
|
|
|
right_leg_vector = np.array([right_ankle[0] - mid_hip[0], right_ankle[1] - mid_hip[1]]) |
|
if np.linalg.norm(right_leg_vector) > 0: |
|
right_leg_vector = right_leg_vector / np.linalg.norm(right_leg_vector) |
|
|
|
|
|
|
|
left_angle_rad = np.arccos(np.clip(np.dot(center_vector, left_leg_vector), -1.0, 1.0)) |
|
right_angle_rad = np.arccos(np.clip(np.dot(center_vector, right_leg_vector), -1.0, 1.0)) |
|
|
|
|
|
left_angle_deg = np.degrees(left_angle_rad) |
|
right_angle_deg = np.degrees(right_angle_rad) |
|
|
|
|
|
if left_ankle[0] < mid_hip[0]: |
|
left_angle_deg = -left_angle_deg |
|
|
|
|
|
if right_ankle[0] > mid_hip[0]: |
|
right_angle_deg = right_angle_deg |
|
else: |
|
right_angle_deg = -right_angle_deg |
|
|
|
|
|
angle_between_legs = 2 * (abs(left_angle_deg) + abs(right_angle_deg)) |
|
|
|
|
|
cv2.putText(annotated_image, f"Left angle: {left_angle_deg:.1f}°", |
|
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) |
|
cv2.putText(annotated_image, f"Right angle: {right_angle_deg:.1f}°", |
|
(10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) |
|
cv2.putText(annotated_image, f"Total angle: {angle_between_legs:.1f}°", |
|
(10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) |
|
|
|
|
|
cv2.circle(annotated_image, mid_hip, 5, (255, 255, 0), -1) |
|
|
|
|
|
mp_drawing.draw_landmarks( |
|
annotated_image, |
|
results.pose_landmarks, |
|
mp_pose.POSE_CONNECTIONS, |
|
landmark_drawing_spec=mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2), |
|
connection_drawing_spec=mp_drawing.DrawingSpec(color=(255, 0, 0), thickness=2)) |
|
|
|
|
|
annotated_image_rgb = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) |
|
|
|
return annotated_image_rgb, left_angle_deg, right_angle_deg, angle_between_legs |
|
|
|
|
|
|
|
def tilt_of_body(image): |
|
""" |
|
Calculate the tilt angle of the body by measuring the angle between a vertical line (y-axis) |
|
and a line connecting the shoulders. |
|
|
|
Args: |
|
image: Input image in BGR format. |
|
|
|
Returns: |
|
A tuple containing: |
|
- The annotated image with visualization |
|
- Tilt angle in degrees |
|
""" |
|
|
|
mp_pose = mp.solutions.pose |
|
mp_drawing = mp.solutions.drawing_utils |
|
|
|
|
|
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
with mp_pose.Pose(static_image_mode=True, model_complexity=2, enable_segmentation=False) as pose: |
|
|
|
results = pose.process(image_rgb) |
|
|
|
|
|
if not results.pose_landmarks: |
|
print("No pose landmarks detected.") |
|
return image, None |
|
|
|
|
|
annotated_image = image.copy() |
|
|
|
|
|
landmarks = results.pose_landmarks.landmark |
|
|
|
|
|
h, w, _ = annotated_image.shape |
|
|
|
|
|
|
|
left_shoulder = (int(landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x * w), |
|
int(landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y * h)) |
|
|
|
|
|
right_shoulder = (int(landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].x * w), |
|
int(landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y * h)) |
|
|
|
|
|
vertical_top = (w // 2, 0) |
|
vertical_bottom = (w // 2, h) |
|
cv2.line(annotated_image, vertical_top, vertical_bottom, (0, 255, 255), 2) |
|
|
|
|
|
cv2.line(annotated_image, left_shoulder, right_shoulder, (255, 0, 0), 2) |
|
|
|
|
|
vertical_vector = np.array([0, 1]) |
|
shoulder_vector = np.array([right_shoulder[0] - left_shoulder[0], |
|
right_shoulder[1] - left_shoulder[1]]) |
|
|
|
|
|
if np.linalg.norm(shoulder_vector) > 0: |
|
shoulder_vector = shoulder_vector / np.linalg.norm(shoulder_vector) |
|
|
|
|
|
dot_product = np.dot(vertical_vector, shoulder_vector) |
|
cross_product = np.cross(np.array([vertical_vector[0], vertical_vector[1], 0]), |
|
np.array([shoulder_vector[0], shoulder_vector[1], 0]))[2] |
|
|
|
|
|
angle_rad = np.arccos(np.clip(dot_product, -1.0, 1.0)) |
|
angle_deg = np.degrees(angle_rad) |
|
|
|
|
|
if cross_product < 0: |
|
angle_deg = -angle_deg |
|
|
|
|
|
cv2.putText(annotated_image, f"Tilt angle: {angle_deg:.1f}°", |
|
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) |
|
|
|
|
|
cv2.circle(annotated_image, left_shoulder, 5, (0, 255, 0), -1) |
|
cv2.circle(annotated_image, right_shoulder, 5, (0, 255, 0), -1) |
|
|
|
|
|
mp_drawing.draw_landmarks( |
|
annotated_image, |
|
results.pose_landmarks, |
|
mp_pose.POSE_CONNECTIONS, |
|
landmark_drawing_spec=mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2), |
|
connection_drawing_spec=mp_drawing.DrawingSpec(color=(255, 0, 0), thickness=2)) |
|
|
|
|
|
annotated_image_rgb = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) |
|
|
|
return annotated_image_rgb, angle_deg |
|
|
|
|
|
st.title("Body and Leg Pose Analysis") |
|
|
|
|
|
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) |
|
|
|
if uploaded_file is not None: |
|
|
|
image = np.array(bytearray(uploaded_file.read()), dtype=np.uint8) |
|
image = cv2.imdecode(image, cv2.IMREAD_COLOR) |
|
|
|
|
|
annotated_image_legs, left_angle, right_angle, total_angle = angle_between_the_legs(image) |
|
annotated_image_tilt, tilt_angle = tilt_of_body(image) |
|
|
|
|
|
st.image(annotated_image_legs, caption=f"Leg Angles (Total: {total_angle:.1f}°)", use_column_width=True) |
|
st.image(annotated_image_tilt, caption=f"Body Tilt Angle: {tilt_angle:.1f}°", use_column_width=True) |
|
|