File size: 4,140 Bytes
016be20
ab2a34e
016be20
c4fc8b6
ab2a34e
016be20
 
ab2a34e
 
c4fc8b6
 
016be20
ab2a34e
c4fc8b6
6f6b1aa
c4fc8b6
 
ab2a34e
c4fc8b6
ab2a34e
c4fc8b6
016be20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab2a34e
 
 
016be20
c4fc8b6
ab2a34e
 
016be20
c4fc8b6
e096673
 
 
 
 
 
 
 
 
 
 
 
 
c4fc8b6
016be20
c4fc8b6
 
 
 
016be20
c4fc8b6
 
ab2a34e
c4fc8b6
 
ab2a34e
c4fc8b6
 
ab2a34e
c4fc8b6
ab2a34e
c4fc8b6
 
 
 
 
 
 
 
 
 
 
 
 
 
ab2a34e
c4fc8b6
ab2a34e
c4fc8b6
 
 
 
 
 
 
 
ab2a34e
016be20
 
ab2a34e
 
c4fc8b6
 
 
 
ab2a34e
016be20
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import cv2
import gradio as gr
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
import tempfile

def process_video(video_path):
    cap = cv2.VideoCapture(video_path)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    out_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
    out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), 20.0, (width, height))

    # Color range for ball (adjust if needed)
    ball_color_lower = np.array([5, 100, 100])
    ball_color_upper = np.array([20, 255, 255])

    trajectory = []
    predicted_points = []

    while True:
        ret, frame = cap.read()
        if not ret:
            break

        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, ball_color_lower, ball_color_upper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)

        contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        if contours:
            c = max(contours, key=cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            if radius > 3:
                center = (int(x), int(y))
                trajectory.append(center)
                cv2.circle(frame, center, int(radius), (0, 0, 255), 2)

        # Draw trajectory line
        for i in range(1, len(trajectory)):
            cv2.line(frame, trajectory[i - 1], trajectory[i], (255, 0, 0), 2)

        # Draw stumps box
        # Automatically scale stump area depending on video height/width
        box_width = int(width * 0.12)     # 12% width for stumps
        box_height = int(height * 0.1)    # 10% height
        stump_center_x = int(width * 0.5) # Middle of the frame
        stump_top_y = int(height * 0.82)  # Bottom 20% of frame (near pitch)

        stump_box = (
            stump_center_x - box_width // 2,
            stump_top_y,
            stump_center_x + box_width // 2,
            stump_top_y + box_height
        )

        cv2.rectangle(frame, stump_box[:2], stump_box[2:], (0, 255, 255), 2)

        # Draw projected path if enough points collected
        if len(trajectory) >= 5 and not predicted_points:
            X = np.array([x for x, y in trajectory]).reshape(-1, 1)
            Y = np.array([y for x, y in trajectory])

            poly = PolynomialFeatures(degree=2)
            X_poly = poly.fit_transform(X)

            model = LinearRegression()
            model.fit(X_poly, Y)

            x_future = np.linspace(min(X)[0], max(X)[0] + 150, num=20).reshape(-1, 1)
            y_future = model.predict(poly.transform(x_future))

            predicted_points = list(zip(x_future.flatten().astype(int), y_future.astype(int)))

        # Draw projected path (dotted yellow)
        for pt in predicted_points:
            if 0 <= pt[0] < width and 0 <= pt[1] < height:
                cv2.circle(frame, pt, 3, (0, 255, 255), -1)

        out.write(frame)

    cap.release()
    out.release()

    # Determine OUT/NOT OUT
    decision = "NOT OUT"
    for x, y in predicted_points:
        if stump_box[0] <= x <= stump_box[2] and stump_box[1] <= y <= stump_box[3]:
            decision = "OUT"
            break

    # Final frame update
    final_frame = cv2.VideoCapture(out_path)
    ret, last_frame = final_frame.read()
    if ret:
        cv2.putText(last_frame, f"DECISION: {decision}", (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255) if decision == "OUT" else (0, 255, 0), 4)
        out_final = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), 20.0, (width, height))
        out_final.write(last_frame)
        out_final.release()

    return out_path

iface = gr.Interface(
    fn=process_video,
    inputs=gr.Video(label="Upload Bowling Video"),
    outputs=gr.Video(label="LBW Tracker Output"),
    title="DRS LBW Review System",
    description="Detect ball trajectory, project path, and decide OUT/NOT OUT using AI"
)

iface.launch()