ergonomics / app.py
Prajithr04's picture
commit
8542af2
raw
history blame
4.26 kB
import cv2
import math
import base64
import numpy as np
import mediapipe as mp
from io import BytesIO
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import Response
from fastapi.middleware.cors import CORSMiddleware # Add CORS support
from PIL import Image
# Initialize FastAPI app
app = FastAPI()
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Initialize Mediapipe Pose model
mp_pose = mp.solutions.pose
pose = mp_pose.Pose(
static_image_mode=False,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
)
# Function to calculate angles between points
def calculate_angle(a, b, c):
ab = (b[0] - a[0], b[1] - a[1])
bc = (c[0] - b[0], c[1] - b[1])
dot_product = ab[0] * bc[0] + ab[1] * bc[1]
magnitude_ab = math.sqrt(ab[0]**2 + ab[1]**2)
magnitude_bc = math.sqrt(bc[0]**2 + bc[1]**2)
angle_radians = math.acos(dot_product / (magnitude_ab * magnitude_bc))
angle_degrees = math.degrees(angle_radians)
return angle_degrees
# Process image with Mediapipe Pose Estimation
def process_frame(image):
h, w, _ = image.shape
# Convert to RGB
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_rgb.flags.writeable = False
results = pose.process(image_rgb)
image_rgb.flags.writeable = True
# Convert back to BGR for display
image = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
if results.pose_landmarks:
# Get landmarks
right_shoulder = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER]
right_hip = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_HIP]
right_ear = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_EAR]
# Convert to pixel coordinates
cx_rs, cy_rs = int(right_shoulder.x * w), int(right_shoulder.y * h)
cx_rh, cy_rh = int(right_hip.x * w), int(right_hip.y * h)
cx_re, cy_re = int(right_ear.x * w), int(right_ear.y * h)
# Create upper reference points
offset = 60
upper_shoulder = (cx_rs, max(0, cy_rs - offset))
upper_hip = (cx_rh, max(0, cy_rh - offset))
# Draw landmarks
cv2.circle(image, upper_shoulder, 5, (0, 255, 0), -1)
cv2.circle(image, upper_hip, 5, (0, 255, 0), -1)
# Draw lines
cv2.line(image, (cx_rh, cy_rh), (cx_rs, cy_rs), (255, 0, 255), 2) # Hip to shoulder
cv2.line(image, (cx_rs, cy_rs), (cx_re, cy_re), (255, 255, 0), 2) # Shoulder to ear
cv2.line(image, (cx_rh, cy_rh), upper_hip, (0, 165, 255), 2) # Hip to upper hip
cv2.line(image, (cx_rs, cy_rs), upper_shoulder, (0, 255, 255), 2) # Shoulder to upper shoulder
# Calculate angles
angle_hip = calculate_angle(upper_hip, (cx_rh, cy_rh), (cx_rs, cy_rs))
angle_neck = calculate_angle((cx_rs, cy_rs), (cx_re, cy_re), upper_shoulder)
# Determine posture status
hip_posture = "Good" if 160 <= angle_hip <= 180 else "Poor"
neck_posture = "Good" if 150 <= angle_neck <= 180 else "Poor"
hip_color = (0, 255, 0) if hip_posture == "Good" else (0, 0, 255)
neck_color = (0, 255, 0) if neck_posture == "Good" else (0, 0, 255)
# Display angles
cv2.putText(image, f"Hip Angle: {angle_hip:.1f} ({hip_posture})", (10, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, hip_color, 2)
cv2.putText(image, f"Neck Angle: {angle_neck:.1f} ({neck_posture})", (10, 90),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, neck_color, 2)
return image
# API Route to receive an image and return processed image
@app.post("/upload")
async def upload_image(file: UploadFile = File(...)):
contents = await file.read()
image = Image.open(BytesIO(contents))
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
# Process the image in async context
processed_image = process_frame(image)
# Encode processed image to return
_, buffer = cv2.imencode(".jpg", processed_image)
return Response(content=buffer.tobytes(), media_type="image/jpeg")