Update app.py
Browse files
app.py
CHANGED
@@ -22,6 +22,9 @@ PITCH_WIDTH = 3.05 # Width of pitch
|
|
22 |
STUMP_HEIGHT = 0.71 # Stump height
|
23 |
STUMP_WIDTH = 0.2286 # Stump width (including bails)
|
24 |
|
|
|
|
|
|
|
25 |
# Load model
|
26 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
27 |
model = attempt_load("best.pt") # Load without map_location
|
@@ -44,6 +47,9 @@ def process_video(video_path):
|
|
44 |
if not ret:
|
45 |
break
|
46 |
|
|
|
|
|
|
|
47 |
# Preprocess frame for YOLOv5
|
48 |
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
49 |
img = torch.from_numpy(img).to(device).float() / 255.0
|
@@ -61,6 +67,9 @@ def process_video(video_path):
|
|
61 |
for *xyxy, conf, cls in det:
|
62 |
x_center = (xyxy[0] + xyxy[2]) / 2
|
63 |
y_center = (xyxy[1] + xyxy[3]) / 2
|
|
|
|
|
|
|
64 |
positions.append((x_center.item(), y_center.item()))
|
65 |
frame_numbers.append(frame_num)
|
66 |
|
|
|
22 |
STUMP_HEIGHT = 0.71 # Stump height
|
23 |
STUMP_WIDTH = 0.2286 # Stump width (including bails)
|
24 |
|
25 |
+
# Model input size (adjust if best.pt was trained with a different size)
|
26 |
+
MODEL_INPUT_SIZE = (640, 640) # (height, width)
|
27 |
+
|
28 |
# Load model
|
29 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
30 |
model = attempt_load("best.pt") # Load without map_location
|
|
|
47 |
if not ret:
|
48 |
break
|
49 |
|
50 |
+
# Resize frame to model input size
|
51 |
+
frame = cv2.resize(frame, MODEL_INPUT_SIZE, interpolation=cv2.INTER_AREA)
|
52 |
+
|
53 |
# Preprocess frame for YOLOv5
|
54 |
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
55 |
img = torch.from_numpy(img).to(device).float() / 255.0
|
|
|
67 |
for *xyxy, conf, cls in det:
|
68 |
x_center = (xyxy[0] + xyxy[2]) / 2
|
69 |
y_center = (xyxy[1] + xyxy[3]) / 2
|
70 |
+
# Scale coordinates back to original frame size
|
71 |
+
x_center = x_center * frame_width / MODEL_INPUT_SIZE[1]
|
72 |
+
y_center = y_center * frame_height / MODEL_INPUT_SIZE[0]
|
73 |
positions.append((x_center.item(), y_center.item()))
|
74 |
frame_numbers.append(frame_num)
|
75 |
|