DRS_V1 / app.py
dschandra's picture
Update app.py
e096673 verified
import cv2
import gradio as gr
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
import tempfile
def process_video(video_path):
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
out_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), 20.0, (width, height))
# Color range for ball (adjust if needed)
ball_color_lower = np.array([5, 100, 100])
ball_color_upper = np.array([20, 255, 255])
trajectory = []
predicted_points = []
while True:
ret, frame = cap.read()
if not ret:
break
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, ball_color_lower, ball_color_upper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contours:
c = max(contours, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
if radius > 3:
center = (int(x), int(y))
trajectory.append(center)
cv2.circle(frame, center, int(radius), (0, 0, 255), 2)
# Draw trajectory line
for i in range(1, len(trajectory)):
cv2.line(frame, trajectory[i - 1], trajectory[i], (255, 0, 0), 2)
# Draw stumps box
# Automatically scale stump area depending on video height/width
box_width = int(width * 0.12) # 12% width for stumps
box_height = int(height * 0.1) # 10% height
stump_center_x = int(width * 0.5) # Middle of the frame
stump_top_y = int(height * 0.82) # Bottom 20% of frame (near pitch)
stump_box = (
stump_center_x - box_width // 2,
stump_top_y,
stump_center_x + box_width // 2,
stump_top_y + box_height
)
cv2.rectangle(frame, stump_box[:2], stump_box[2:], (0, 255, 255), 2)
# Draw projected path if enough points collected
if len(trajectory) >= 5 and not predicted_points:
X = np.array([x for x, y in trajectory]).reshape(-1, 1)
Y = np.array([y for x, y in trajectory])
poly = PolynomialFeatures(degree=2)
X_poly = poly.fit_transform(X)
model = LinearRegression()
model.fit(X_poly, Y)
x_future = np.linspace(min(X)[0], max(X)[0] + 150, num=20).reshape(-1, 1)
y_future = model.predict(poly.transform(x_future))
predicted_points = list(zip(x_future.flatten().astype(int), y_future.astype(int)))
# Draw projected path (dotted yellow)
for pt in predicted_points:
if 0 <= pt[0] < width and 0 <= pt[1] < height:
cv2.circle(frame, pt, 3, (0, 255, 255), -1)
out.write(frame)
cap.release()
out.release()
# Determine OUT/NOT OUT
decision = "NOT OUT"
for x, y in predicted_points:
if stump_box[0] <= x <= stump_box[2] and stump_box[1] <= y <= stump_box[3]:
decision = "OUT"
break
# Final frame update
final_frame = cv2.VideoCapture(out_path)
ret, last_frame = final_frame.read()
if ret:
cv2.putText(last_frame, f"DECISION: {decision}", (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255) if decision == "OUT" else (0, 255, 0), 4)
out_final = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), 20.0, (width, height))
out_final.write(last_frame)
out_final.release()
return out_path
iface = gr.Interface(
fn=process_video,
inputs=gr.Video(label="Upload Bowling Video"),
outputs=gr.Video(label="LBW Tracker Output"),
title="DRS LBW Review System",
description="Detect ball trajectory, project path, and decide OUT/NOT OUT using AI"
)
iface.launch()