Spaces:
Running
Running
File size: 5,856 Bytes
283d164 d038b68 de4d7aa d038b68 d8fc5c3 120dac5 22b5e62 3d9e5a0 af1ab42 22b5e62 120dac5 90d7d51 4d1ccae af1ab42 4d1ccae 22b5e62 120dac5 90d7d51 4d1ccae af1ab42 4d1ccae 22b5e62 4d1ccae 22b5e62 219eb27 22b5e62 376c248 22b5e62 a5e5e5e de4d7aa 4d1ccae 776ce65 376c248 776ce65 4d1ccae 776ce65 4d1ccae 776ce65 120dac5 776ce65 de4d7aa 4d1ccae de4d7aa a5e5e5e 4d1ccae a5e5e5e de4d7aa a5e5e5e de4d7aa a5e5e5e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
import gradio as gr
import os
import numpy as np
from utils import read_video, save_video
from trackers import Tracker
from team_assigner import TeamAssigner
from player_ball_assigner import PlayerBallAssigner
from camera_movement_estimator import CameraMovementEstimator
from view_transformer import ViewTransformer
from speed_and_distance_estimator import SpeedAndDistance_Estimator
def process_video(input_video, player_stats=True, ball_stats=True):
print("input: " + input_video)
# Read Video
video_frames = read_video(input_video) # use the uploaded file
# Initialize Tracker
tracker = Tracker('models/best.pt')
if input_video.endswith("121364_0_small.mp4"):
print("loading cached tracks")
tracks = tracker.get_object_tracks(video_frames,
read_from_stub=True,
stub_path='stubs/track_stub_121364_0_small.pkl')
else:
tracks = tracker.get_object_tracks(video_frames)
# Interpolate Ball Positions
tracks["ball"] = tracker.interpolate_ball_positions(tracks["ball"])
# Get object positions
tracker.add_position_to_tracks(tracks)
# Camera movement estimator
camera_movement_estimator = CameraMovementEstimator(video_frames[0])
if input_video.endswith("121364_0_small.mp4"):
print("loading cached camera movements")
camera_movement_per_frame = camera_movement_estimator.get_camera_movement(video_frames,
read_from_stub=True,
stub_path='stubs/camera_movement_stub_121364_0_small.pkl')
else:
camera_movement_per_frame = camera_movement_estimator.get_camera_movement(video_frames)
camera_movement_estimator.add_adjust_positions_to_tracks(tracks, camera_movement_per_frame)
# View Transformer
view_transformer = ViewTransformer()
view_transformer.add_transformed_position_to_tracks(tracks)
# Speed and distance estimator
speed_and_distance_estimator = SpeedAndDistance_Estimator()
exclude_objects=['referees', 'players', 'ball']
if player_stats:
exclude_objects.remove('players')
if ball_stats:
exclude_objects.remove('ball')
speed_and_distance_estimator.add_speed_and_distance_to_tracks(tracks, exclude_objects)
# Assign Player Teams
team_assigner = TeamAssigner()
team_assigner.assign_team_color(video_frames[0], tracks['players'][0])
for frame_num, player_track in enumerate(tracks['players']):
for player_id, track in player_track.items():
team = team_assigner.get_player_team(video_frames[frame_num], track['bbox'], player_id)
tracks['players'][frame_num][player_id]['team'] = team
tracks['players'][frame_num][player_id]['team_color'] = team_assigner.team_colors[team]
# Assign Ball Acquisition
player_assigner = PlayerBallAssigner()
team_ball_control = []
for frame_num, player_track in enumerate(tracks['players']):
ball_bbox = tracks['ball'][frame_num][1]['bbox']
assigned_player = player_assigner.assign_ball_to_player(player_track, ball_bbox)
if assigned_player != -1:
tracks['players'][frame_num][assigned_player]['has_ball'] = True
team_ball_control.append(tracks['players'][frame_num][assigned_player]['team'])
else:
if team_ball_control: # in case first few frames assigned_player == -1
team_ball_control.append(team_ball_control[-1])
team_ball_control = np.array(team_ball_control)
# Draw output
output_video_frames = tracker.draw_annotations(video_frames, tracks, team_ball_control)
output_video_frames = camera_movement_estimator.draw_camera_movement(output_video_frames, camera_movement_per_frame)
speed_and_distance_estimator.draw_speed_and_distance(output_video_frames, tracks)
# Save output video
output_path = 'output_videos/output_video.avi'
save_video(output_video_frames, output_path)
return output_path
# Gradio Interface
title="Football Match Analytics with YOLO and OpenCV"
description="""
This demo processes football game videos to detect players and referees, track the ball, assign players to teams using color pixel clustering, and compute ball possession per team.
It also estimates camera movement with Lucas-Kanade optical flow and applies perspective transformation to calculate the real-time speed and total distance traveled by each player and the ball.
The YOLO detection model was fine-tuned with this dataset: https://universe.roboflow.com/roboflow-jvuqo/football-players-detection-3zvbc/dataset
Original Tutorial Reference: https://www.youtube.com/watch?v=neBZ6huolkg
**Note**: this space is running on a CPU, so inferencing new video may take some time. (Avg time during test: 1min processing per 5 second of video)"""
examples = [["input_videos/121364_0_small.mp4", True, True]]
interface = gr.Interface(fn=process_video,
inputs=[
gr.Video(label="Upload Video (mp4, avi, mov) Max: 30sec"),
gr.Checkbox(label="Include Player Stats", value=True),
gr.Checkbox(label="Include Ball Stats", value=True)
],
outputs=gr.Video(label="Processed Video"),
examples=examples,
live=False, # No live update to avoid real-time processing issues
title=title,
description=description) # Allow users to download the processed video
interface.launch(debug=True, show_error = True)
|