Dan Biagini commited on
Commit
52d88a8
·
1 Parent(s): faeb28b

add v3 pressure meter feature coverage

Browse files
README.md CHANGED
@@ -11,10 +11,37 @@ pinned: false
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
 
 
14
 
15
- ## Dev install virtual environment -- python 3.11.5
16
- ```python -m venv .venv```
17
- ```source .venv/bin/activate```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  ## Update requirements.txt
20
  There are two requirements.txt files, the requirements-cpu.txt can be used for smaller installations but will only use CPU based
 
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
 
14
+ ## Development Environment Setup
15
 
16
+ ### Option 1: Conda Environment (Recommended)
17
+ ```bash
18
+ # Create new conda environment with Python 3.11
19
+ conda create -n topshelf python=3.11
20
+
21
+ # Activate the environment
22
+ conda activate topshelf
23
+
24
+ # Install PyTorch first (CPU-only version)
25
+ conda install pytorch cpuonly -c pytorch
26
+
27
+ # Then install other requirements
28
+ pip install -r requirements-cpu.txt # For CPU-only installation
29
+ # OR
30
+ pip install -r requirements.txt # For full installation with GPU support
31
+ ```
32
+
33
+ ### Option 2: Python Virtual Environment
34
+ ```bash
35
+ # Create virtual environment
36
+ python -m venv .venv
37
+ source .venv/bin/activate
38
+
39
+ # Install PyTorch first (CPU-only version)
40
+ pip install torch --index-url https://download.pytorch.org/whl/cpu
41
+
42
+ # Then install other requirements
43
+ pip install -r requirements-cpu.txt
44
+ ```
45
 
46
  ## Update requirements.txt
47
  There are two requirements.txt files, the requirements-cpu.txt can be used for smaller installations but will only use CPU based
requirements-cpu.txt CHANGED
@@ -80,9 +80,6 @@ tenacity==8.5.0
80
  thinc==8.2.5
81
  threadpoolctl==3.5.0
82
  toml==0.10.2
83
- torch==2.4.1+cpu
84
- torchaudio==2.4.1+cpu
85
- torchvision==0.19.1+cpu
86
  tornado==6.4.1
87
  tqdm==4.66.5
88
  typer==0.12.5
 
80
  thinc==8.2.5
81
  threadpoolctl==3.5.0
82
  toml==0.10.2
 
 
 
83
  tornado==6.4.1
84
  tqdm==4.66.5
85
  typer==0.12.5
src/About.py DELETED
@@ -1,20 +0,0 @@
1
- import streamlit as st
2
-
3
- import logging
4
-
5
- st.set_page_config(page_title='About TopShelf', layout="wide",
6
- page_icon="🥅")
7
-
8
- st.title('Welcome To Top Shelf :goal_net:',
9
- help=':video_camera: + :ice_hockey_stick_and_puck: = :clipboard:')
10
- st.subheader('Artificial Intelligence for Hockey Coaches and Players',
11
- help='Proof of concept application')
12
-
13
- overview = '''**Top Shelf** helps coaches and players analyze their gameplay, providing helpful suggestions on areas for improvement.
14
-
15
- The secret behind **Top Shelf** is *Computer Vision* AI technology that recognizes various hockey related objects in videos.
16
- This model can recognize players, nets, referees, rink markings and more.
17
-
18
- **Top Shelf** uses this technology to analyze game play and provide insightful suggestions on areas for improvement.
19
- '''
20
- st.markdown(overview)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/Home.py CHANGED
@@ -1,71 +1,4 @@
1
  import streamlit as st
2
- from ultralytics import YOLO
3
- from huggingface_hub import hf_hub_download
4
- import cv2
5
- import numpy as np
6
-
7
- import logging
8
-
9
- @st.cache_resource
10
- def get_model():
11
- repo_id = "danbiagini/hockey_breeds_v2"
12
- return hf_hub_download(repo_id=repo_id, filename="hockey_breeds-v2-101623.pt")
13
-
14
-
15
- def run_inference(img, model, thresh=0.5):
16
- model = YOLO(model_f)
17
- st.session_state.results = model(img)
18
- return draw_hockey_boxes(img, st.session_state.results, thresh)
19
-
20
-
21
- def draw_hockey_boxes(frame, results, thresh=0.5):
22
- colors = {0: (0, 255, 0), 1: (255, 0, 0), 2: (0, 0, 255), 3: (128, 0, 0), 4: (
23
- 0, 128, 0), 5: (0, 0, 128), 6: (0, 64, 0), 7: (64, 0, 0), 8: (0, 0, 64)}
24
- font_scale = frame.shape[0] / 500
25
- objects = []
26
-
27
- for name in results:
28
- for box in name.boxes.data.tolist():
29
- x1, y1, x2, y2, score, class_id = box
30
- objects.append((name.names[int(class_id)], score))
31
-
32
- if score > thresh:
33
- cv2.rectangle(frame, (int(x1), int(y1)),
34
- (int(x2), int(y2)), colors[(class_id % 9)], 3)
35
- cv2.putText(frame, f'{name.names[int(class_id)].upper()}: {score:.2f}', (int(x1), int(y1 - 10)),
36
- cv2.FONT_HERSHEY_SIMPLEX, font_scale, colors[(class_id % 9)], 3, cv2.LINE_AA)
37
- else:
38
- print(
39
- f'Found an object under confidence threshold {thresh} type: {name.names[class_id]}, score:{score}, x1, y2:{x1}, {y2}')
40
- return objects
41
-
42
- def reset_image():
43
- st.session_state.img = None
44
-
45
- def upload_img():
46
- if st.session_state.upload_img is not None:
47
- st.session_state.img = st.session_state.upload_img
48
-
49
- def get_naked_image():
50
- if st.session_state.img is not None:
51
- img = st.session_state.img
52
- img.seek(0)
53
- return(cv2.imdecode(np.frombuffer(img.read(), np.uint8), 1))
54
- return None
55
-
56
- def use_sample_image():
57
- st.session_state.img = open('src/images/samples/v2/net-chaos.jpg', 'rb')
58
-
59
- # Init state
60
- if 'results' not in st.session_state:
61
- st.session_state.results = []
62
-
63
- if 'thresh' not in st.session_state:
64
- st.session_state.thresh = 0.5
65
-
66
- if 'img' not in st.session_state:
67
- st.session_state.img = None
68
-
69
 
70
  st.set_page_config(page_title='TopShelf POC', layout="wide",
71
  page_icon="🥅")
@@ -75,56 +8,21 @@ st.title('Welcome To Top Shelf :goal_net:',
75
  st.subheader('Artificial Intelligence for Hockey Coaches and Players',
76
  help='Proof of concept application')
77
 
78
- overview = '''**Top Shelf** helps coaches and players analyze their gameplay, providing helpful suggestions & recommendations on areas for improvement.
79
 
80
  We're starting with a focus on ice hockey, however this same technology could apply to other "invasion" games and sports, for example lacrosse, basketball, soccer, etc.
81
 
82
- The special sauce behind **Top Shelf** is AI *Computer Vision* technology that recognizes various hockey related objects in videos.
83
- The foundation of the technology is an AI model that can recognize players, nets, referees, pucks, and rink areas.
84
 
85
  **Top Shelf** uses this technology to analyze game play and provide insightful suggestions on areas for improvement.
86
  '''
87
  st.markdown(overview)
88
 
89
- st.subheader('Getting Started')
90
- st.markdown('''We're currently in the training and testing phase of **Top Shelf** development. This is a proof of concept application that friends of **Top Shelf** can use to help in development.
91
- To help us understand how our *Computer Vision* model is working you can upload hockey pictures and then the app will display what hockey objects were found. ''')
92
-
93
- st.write("Upload an image file to try detecting hockey objects in your own hockey image, or use a sample image below.")
94
-
95
-
96
- if st.session_state.img is None:
97
- st.file_uploader("Upload an image and Hockey Breeds v2 will find the hockey objects in the image",
98
- type=["jpg", "jpeg", "png"], key='upload_img', on_change=upload_img)
99
-
100
- with st.expander("Sample Images"):
101
- st.image('src/images/samples/v2/net-chaos.jpg')
102
- st.button("Use Sample", on_click=use_sample_image)
103
-
104
- img = get_naked_image()
105
- if img is not None:
106
-
107
- thresh = st.slider('Set the object confidence threshold', key='thresh',
108
- min_value=0.0, max_value=1.0, value=0.5, step=0.05)
109
-
110
- with st.status("Detecting hockey objects..."):
111
- st.write("Loading model...")
112
- model_f = get_model()
113
 
114
- st.write("Running inference on image...")
115
- objects = run_inference(img, model_f, thresh)
116
-
117
- st.dataframe(objects, column_config={
118
- "0": "Object",
119
- "1": "Confidence"
120
- })
121
 
122
- # check if the results list is empty
123
- if len(st.session_state.results) == 0:
124
- st.write('**No hockey objects found in image!**')
125
- st.image(img, caption='Uploaded Image had no hockey objects')
126
- else:
127
- st.image(img, caption='Image with hockey object bounding boxes')
128
 
129
- st.button("Reset Image", on_click=reset_image)
130
 
 
1
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  st.set_page_config(page_title='TopShelf POC', layout="wide",
4
  page_icon="🥅")
 
8
  st.subheader('Artificial Intelligence for Hockey Coaches and Players',
9
  help='Proof of concept application')
10
 
11
+ overview = '''**Top Shelf** helps coaches and players analyze their gameplay using Machine Learning, providing helpful suggestions & recommendations on areas for improvement.
12
 
13
  We're starting with a focus on ice hockey, however this same technology could apply to other "invasion" games and sports, for example lacrosse, basketball, soccer, etc.
14
 
15
+ The special sauce behind **Top Shelf** AI is *Computer Vision* and *Machine Learning* technology that tracks various hockey game objects in videos, determines player team memberships and measures game dynamics.
 
16
 
17
  **Top Shelf** uses this technology to analyze game play and provide insightful suggestions on areas for improvement.
18
  '''
19
  st.markdown(overview)
20
 
21
+ st.subheader('Hockey Pressure Meter')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ st.markdown('''The Hockey Pressure Meter is a visualization of the Top Shelf ML pipeline which uses the location and team membership of each player on the ice to calculate the pressure applied by each team during the course of the game.
 
 
 
 
 
 
24
 
25
+ ''')
 
 
 
 
 
26
 
27
+ st.image('src/images/samples/v3/pressure_meter.png')
28
 
src/app.py CHANGED
@@ -3,9 +3,9 @@ import streamlit as st
3
  import logging
4
 
5
  app = st.navigation(
6
- {"App": [st.Page("Home.py", title="Home", icon=":material/home:"),
7
- st.Page("About.py", icon="🥅")],
8
  "Models": [
 
9
  st.Page("hockey_object_detection.py", title="v2 - Hockey Object Detection", icon=":material/filter_b_and_w:"),
10
  st.Page("Hockey_Breeds.py", title="v1 - Hockey Breeds", icon=":material/gradient:")
11
  ]
 
3
  import logging
4
 
5
  app = st.navigation(
6
+ {"App": [st.Page("Home.py", title="Home", icon=":material/home:")],
 
7
  "Models": [
8
+ st.Page("pressure_meter.py", title="v3 - Pressure Meter", icon=":material/filter_b_and_w:"),
9
  st.Page("hockey_object_detection.py", title="v2 - Hockey Object Detection", icon=":material/filter_b_and_w:"),
10
  st.Page("Hockey_Breeds.py", title="v1 - Hockey Breeds", icon=":material/gradient:")
11
  ]
src/hockey_object_detection.py CHANGED
@@ -1,4 +1,71 @@
1
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  # Top down page rendering
4
  st.set_page_config(page_title='Hockey Breeds v2 - Objects', layout="wide",
@@ -10,11 +77,54 @@ with the ability to recognize individual "objects" within an image, which paves
10
 
11
  st.markdown(intro)
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  st.subheader('Object Detection Technical Details')
14
 
15
  desc = '''Hockey Breed detector v2 uses a state of the art (circa 2023) computer vision approach.
16
 
17
- I used the same training images as the first version of the Hockey Breeds model, but change the ML algorithm to use YOLO object detection (YOLO v8).
18
  The output will be a set of hockey objects (defined by "bounding boxes") with labels for any hockey image uploaded.
19
 
20
  **Object List**:
@@ -29,8 +139,12 @@ The output will be a set of hockey objects (defined by "bounding boxes") with la
29
  st.markdown(desc)
30
 
31
  st.subheader("Sample")
32
- st.image('src/images/samples/v2/v2-sample1-090124.png',
 
 
33
  caption='Sample image with hockey objects detected')
 
 
34
 
35
  st.subheader("Validation Results")
36
 
 
1
  import streamlit as st
2
+ from ultralytics import YOLO
3
+ from huggingface_hub import hf_hub_download
4
+ import cv2
5
+ import numpy as np
6
+
7
+ import logging
8
+
9
+ @st.cache_resource
10
+ def get_model():
11
+ repo_id = "danbiagini/hockey_breeds_v2"
12
+ return hf_hub_download(repo_id=repo_id, filename="hockey_breeds-v2-101623.pt")
13
+
14
+
15
+ def run_inference(img, model, thresh=0.5):
16
+ model = YOLO(model_f)
17
+ st.session_state.results = model(img)
18
+ return draw_hockey_boxes(img, st.session_state.results, thresh)
19
+
20
+
21
+ def draw_hockey_boxes(frame, results, thresh=0.5):
22
+ colors = {0: (0, 255, 0), 1: (255, 0, 0), 2: (0, 0, 255), 3: (128, 0, 0), 4: (
23
+ 0, 128, 0), 5: (0, 0, 128), 6: (0, 64, 0), 7: (64, 0, 0), 8: (0, 0, 64)}
24
+ font_scale = frame.shape[0] / 500
25
+ objects = []
26
+
27
+ for name in results:
28
+ for box in name.boxes.data.tolist():
29
+ x1, y1, x2, y2, score, class_id = box
30
+ objects.append((name.names[int(class_id)], score))
31
+
32
+ if score > thresh:
33
+ cv2.rectangle(frame, (int(x1), int(y1)),
34
+ (int(x2), int(y2)), colors[(class_id % 9)], 3)
35
+ cv2.putText(frame, f'{name.names[int(class_id)].upper()}: {score:.2f}', (int(x1), int(y1 - 10)),
36
+ cv2.FONT_HERSHEY_SIMPLEX, font_scale, colors[(class_id % 9)], 3, cv2.LINE_AA)
37
+ else:
38
+ print(
39
+ f'Found an object under confidence threshold {thresh} type: {name.names[class_id]}, score:{score}, x1, y2:{x1}, {y2}')
40
+ return objects
41
+
42
+ def reset_image():
43
+ st.session_state.img = None
44
+
45
+ def upload_img():
46
+ if st.session_state.upload_img is not None:
47
+ st.session_state.img = st.session_state.upload_img
48
+
49
+ def get_naked_image():
50
+ if st.session_state.img is not None:
51
+ img = st.session_state.img
52
+ img.seek(0)
53
+ return(cv2.imdecode(np.frombuffer(img.read(), np.uint8), 1))
54
+ return None
55
+
56
+ def use_sample_image():
57
+ st.session_state.img = open('src/images/samples/v2/net-chaos.jpg', 'rb')
58
+
59
+ # Init state
60
+ if 'results' not in st.session_state:
61
+ st.session_state.results = []
62
+
63
+ if 'thresh' not in st.session_state:
64
+ st.session_state.thresh = 0.5
65
+
66
+ if 'img' not in st.session_state:
67
+ st.session_state.img = None
68
+
69
 
70
  # Top down page rendering
71
  st.set_page_config(page_title='Hockey Breeds v2 - Objects', layout="wide",
 
77
 
78
  st.markdown(intro)
79
 
80
+ st.subheader('Try Hockey Breeds v2')
81
+ st.markdown('''To help us understand how our *Computer Vision* model is working you can upload hockey pictures and then the app will display what hockey objects were found. ''')
82
+
83
+ st.write("Upload an image file to try detecting hockey objects in your own hockey image, or use a sample image below.")
84
+
85
+
86
+ if st.session_state.img is None:
87
+ st.file_uploader("Upload an image and Hockey Breeds v2 will find the hockey objects in the image",
88
+ type=["jpg", "jpeg", "png"], key='upload_img', on_change=upload_img)
89
+
90
+ with st.expander("Sample Images"):
91
+ st.image('src/images/samples/v2/net-chaos.jpg')
92
+ st.button("Use Sample", on_click=use_sample_image)
93
+
94
+ img = get_naked_image()
95
+ if img is not None:
96
+
97
+ thresh = st.slider('Set the object confidence threshold', key='thresh',
98
+ min_value=0.0, max_value=1.0, value=0.5, step=0.05)
99
+
100
+ with st.status("Detecting hockey objects..."):
101
+ st.write("Loading model...")
102
+ model_f = get_model()
103
+
104
+ st.write("Running inference on image...")
105
+ objects = run_inference(img, model_f, thresh)
106
+
107
+ st.dataframe(objects, column_config={
108
+ "0": "Object",
109
+ "1": "Confidence"
110
+ })
111
+
112
+ # check if the results list is empty
113
+ if len(st.session_state.results) == 0:
114
+ st.write('**No hockey objects found in image!**')
115
+ st.image(img, caption='Uploaded Image had no hockey objects')
116
+ else:
117
+ st.image(img, caption='Image with hockey object bounding boxes')
118
+
119
+ st.button("Reset Image", on_click=reset_image)
120
+
121
+
122
+
123
  st.subheader('Object Detection Technical Details')
124
 
125
  desc = '''Hockey Breed detector v2 uses a state of the art (circa 2023) computer vision approach.
126
 
127
+ I used the same training images as the first version of the Hockey Breeds model, but change the Neural Network to use YOLO object detection (YOLO v8).
128
  The output will be a set of hockey objects (defined by "bounding boxes") with labels for any hockey image uploaded.
129
 
130
  **Object List**:
 
139
  st.markdown(desc)
140
 
141
  st.subheader("Sample")
142
+
143
+ try:
144
+ st.image('src/images/samples/v2/v2-sample1-090124.png',
145
  caption='Sample image with hockey objects detected')
146
+ except Exception as e:
147
+ st.error(f"Error loading image: {e}")
148
 
149
  st.subheader("Validation Results")
150
 
src/images/samples/v3/pressure_meter.png ADDED
src/pressure_meter.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ import json
3
+ import os
4
+ import streamlit as st
5
+ import requests
6
+ import pandas as pd
7
+ from io import StringIO
8
+ import plotly.graph_objects as go
9
+
10
+ # Top down page rendering
11
+ st.set_page_config(page_title='Hockey Breeds v3 - Pressure Meter', layout="wide",
12
+ page_icon=":frame_with_picture:")
13
+
14
+ st.title('Hockey Breeds v3 - Pressure Meter')
15
+ intro = '''Version 3 of Hockey Breeds introduces a new feature: the **Pressure Meter**. Pressure is a term used in hockey to describe the buildup of offensive momuntum which often leads to goals.
16
+
17
+ The **Pressure Meter** builds on a number of major enhancements to the Top-Shelf AI platform:
18
+ 1. Improved and expanded data set and improved model
19
+ 1. Parallelized processing pipeline for processing input video and generating output metrics in *real time*
20
+ 1. Analysis and metrics include:
21
+ * Team jersey color determination
22
+ * Player team assignments
23
+ * Skater speeds and accelerations
24
+ * Player positions relative to nearest goalie & net
25
+ * Improved puck tracking and interpolation
26
+ * Game play state analysis (stoppage vs live play)
27
+ '''
28
+ st.markdown(intro)
29
+
30
+ st.subheader('Pressure Meter Visualization')
31
+
32
+ # get the data file location
33
+ data_location = st.text_input('Enter the location of the stream analytics metadata file',
34
+ value='https://storage.googleapis.com/topshelf-clients/pressure-meter/2025-02-09/22809/stream_metadata.json')
35
+ metadata = None
36
+ stream_base_url = None
37
+ if data_location:
38
+ # should be an http link
39
+ if not data_location.startswith('http'):
40
+ st.error('Data location must be an http link')
41
+ else:
42
+ # download the data from the link
43
+ if data_location.endswith('/'):
44
+ data_location = data_location + 'stream_metadata.json'
45
+ data = requests.get(data_location)
46
+ # load the data from the json file
47
+ metadata = json.loads(data.text)
48
+
49
+ # determine the base url for the stream
50
+ stream_base_url = data_location.split('stream_metadata.json')[0]
51
+
52
+
53
+ # load the data from the csv files
54
+ if metadata:
55
+ # get the data from the csv files
56
+ files = metadata['output_files']
57
+
58
+ # get the base timestamp for the stream
59
+ base_timestamp = datetime.fromisoformat(metadata['video_start_time'])
60
+
61
+ # Create an empty list to store individual dataframes
62
+ dfs = []
63
+
64
+ for ts, file in files.items():
65
+ try:
66
+ response = requests.get(stream_base_url + file)
67
+ response.raise_for_status()
68
+
69
+ data_string = StringIO(response.text)
70
+ df = pd.read_csv(data_string)
71
+
72
+ ts_delta = datetime.fromtimestamp(int(ts)).astimezone(base_timestamp.tzinfo) - base_timestamp
73
+ df['second_offset'] = df['second_offset'] + ts_delta.total_seconds()
74
+
75
+ dfs.append(df)
76
+
77
+ except Exception as e:
78
+ st.error(f"Failed to load data for timestamp {ts}, file: {file}")
79
+ st.error(f"Error: {str(e)}")
80
+ continue
81
+
82
+ # Log the number of files processed
83
+ st.info(f"Successfully loaded {len(dfs)} out of {len(files)} files")
84
+
85
+ # Concatenate all dataframes and sort by the second_offset
86
+ combined_df = pd.concat(dfs, ignore_index=True)
87
+ combined_df = combined_df.sort_values('second_offset')
88
+
89
+ # Check for gaps in the sequence
90
+ expected_range = set(range(int(combined_df['second_offset'].min()),
91
+ int(combined_df['second_offset'].max()) + 1))
92
+ actual_range = set(combined_df['second_offset'].astype(int))
93
+ missing_seconds = sorted(expected_range - actual_range)
94
+
95
+ if missing_seconds:
96
+ st.warning("Found gaps in the data sequence:")
97
+ # Group consecutive missing seconds into ranges for cleaner output
98
+ gaps = []
99
+ start = missing_seconds[0]
100
+ prev = start
101
+ for curr in missing_seconds[1:] + [None]:
102
+ if curr != prev + 1:
103
+ if start == prev:
104
+ gaps.append(f"{start}")
105
+ else:
106
+ gaps.append(f"{start}-{prev}")
107
+ start = curr
108
+ prev = curr
109
+
110
+ st.warning(f"Missing seconds: {', '.join(gaps)}")
111
+
112
+ # Calculate cumulative counts and ratios - only count actual pressure values
113
+ combined_df['team1_cumulative'] = (combined_df['pressure_balance'] > 0).astype(int).cumsum()
114
+ combined_df['team2_cumulative'] = (combined_df['pressure_balance'] < 0).astype(int).cumsum()
115
+ combined_df['total_cumulative'] = combined_df['team1_cumulative'] + combined_df['team2_cumulative']
116
+
117
+ # Avoid division by zero by using where
118
+ combined_df['team1_pressure_ratio'] = (combined_df['team1_cumulative'] /
119
+ combined_df['total_cumulative'].where(combined_df['total_cumulative'] > 0, 1))
120
+ combined_df['team2_pressure_ratio'] = (combined_df['team2_cumulative'] /
121
+ combined_df['total_cumulative'].where(combined_df['total_cumulative'] > 0, 1))
122
+
123
+ # Calculate the ratio difference for the balance visualization
124
+ combined_df['pressure_ratio_diff'] = combined_df['team1_pressure_ratio'] - combined_df['team2_pressure_ratio']
125
+
126
+ # Add pressure balance visualization using the ratio difference
127
+ st.subheader("Pressure Waves")
128
+ balance_df = pd.DataFrame({
129
+ 'second_offset': combined_df['second_offset'],
130
+ 'pressure_ratio_diff': combined_df['pressure_ratio_diff']
131
+ })
132
+
133
+ # Get team colors from metadata and parse them
134
+ def parse_rgb(color_str):
135
+ # Extract numbers from format 'rgb(r,g,b)'
136
+ r, g, b = map(int, color_str.strip('rgb()').split(','))
137
+ return r, g, b
138
+
139
+ team1_color = metadata.get('team1_color', 'rgb(54, 162, 235)') # default blue if not found
140
+ team2_color = metadata.get('team2_color', 'rgb(255, 99, 132)') # default red if not found
141
+
142
+ # Parse RGB values
143
+ team1_rgb = parse_rgb(team1_color)
144
+ team2_rgb = parse_rgb(team2_color)
145
+
146
+ fig = go.Figure()
147
+
148
+ # Add positive values with team1 color
149
+ fig.add_trace(
150
+ go.Scatter(
151
+ x=combined_df['second_offset'],
152
+ y=combined_df['pressure_ratio_diff'].clip(lower=0),
153
+ fill='tozeroy',
154
+ fillcolor=f'rgba{(*team1_rgb, 0.2)}',
155
+ line=dict(
156
+ color=team1_color,
157
+ shape='hv'
158
+ ),
159
+ name='Team 1 Dominant',
160
+ hovertemplate='Time: %{x:.1f}s<br>Dominance: %{y:.2f}<br>Team 1<extra></extra>',
161
+ hoveron='points+fills'
162
+ )
163
+ )
164
+
165
+ # Add negative values with team2 color
166
+ fig.add_trace(
167
+ go.Scatter(
168
+ x=combined_df['second_offset'],
169
+ y=combined_df['pressure_ratio_diff'].clip(upper=0),
170
+ fill='tozeroy',
171
+ fillcolor=f'rgba{(*team2_rgb, 0.2)}',
172
+ line=dict(
173
+ color=team2_color,
174
+ shape='hv'
175
+ ),
176
+ name='Team 2 Dominant',
177
+ hovertemplate='Time: %{x:.1f}s<br>Dominance: %{y:.2f}<br>Team 2<extra></extra>',
178
+ hoveron='points+fills'
179
+ )
180
+ )
181
+
182
+ fig.update_layout(
183
+ yaxis=dict(
184
+ range=[-1, 1],
185
+ zeroline=True,
186
+ zerolinewidth=2,
187
+ zerolinecolor='rgba(0,0,0,0.2)',
188
+ gridcolor='rgba(0,0,0,0.1)',
189
+ title='Team Dominance'
190
+ ),
191
+ xaxis=dict(
192
+ title='Time (seconds)',
193
+ gridcolor='rgba(0,0,0,0.1)'
194
+ ),
195
+ plot_bgcolor='white',
196
+ height=400,
197
+ margin=dict(l=0, r=0, t=20, b=0),
198
+ showlegend=True,
199
+ legend=dict(
200
+ yanchor="top",
201
+ y=0.99,
202
+ xanchor="left",
203
+ x=0.01
204
+ )
205
+ )
206
+
207
+ st.plotly_chart(fig, use_container_width=True)
208
+
209
+ with st.expander("Pressure Data"):
210
+ st.write(combined_df)
211
+
212
+ # add details in a sub section with expander
213
+ with st.expander("Pressure Meter Details"):
214
+ st.write("""
215
+ The Pressure Meter is a visualization of the pressure waves in the game. It is a line chart of the cumulative pressure counts for each team over time.
216
+ """)
217
+
218
+ # Create two columns for charts
219
+ col1, col2 = st.columns(2)
220
+
221
+ with col1:
222
+ st.subheader("Cumulative Pressure Counts")
223
+ st.line_chart(combined_df, x='second_offset', y=['team1_cumulative', 'team2_cumulative'])
224
+
225
+ with col2:
226
+ st.subheader("Pressure Ratio Over Time")
227
+ st.area_chart(combined_df,
228
+ x='second_offset',
229
+ y=['team1_pressure_ratio', 'team2_pressure_ratio'])
230
+
231
+
232
+ # Show current dominance percentage
233
+ current_ratio = combined_df.iloc[-1]['pressure_balance']
234
+ if current_ratio > 0:
235
+ dominant_team = 'Team 1'
236
+ pressure_value = current_ratio
237
+ elif current_ratio < 0:
238
+ dominant_team = 'Team 2'
239
+ pressure_value = abs(current_ratio)
240
+ else:
241
+ dominant_team = 'Neutral'
242
+ pressure_value = 0
243
+
244
+ st.metric(
245
+ label="Dominant Team Pressure",
246
+ value=f"{dominant_team}",
247
+ delta=f"{pressure_value*100:.1f}%"
248
+ )
249
+
250
+ # After loading metadata
251
+ st.subheader("Data Files Analysis")
252
+
253
+ # Analyze the timestamps in the metadata
254
+ timestamps = sorted([int(ts) for ts in files.keys()])
255
+ time_diffs = [timestamps[i+1] - timestamps[i] for i in range(len(timestamps)-1)]
256
+
257
+ st.info(f"Number of data files: {len(files)}")
258
+ st.info(f"Time range: {datetime.fromtimestamp(timestamps[0])} to {datetime.fromtimestamp(timestamps[-1])}")
259
+ st.info(f"Time differences between files: {set(time_diffs)} seconds")
260
+
261
+ # Show the actual files and timestamps
262
+ with st.expander("Stream Metadata Details"):
263
+ st.write(metadata)
264
+ # Log the data range
265
+ st.write(f"Data range: {combined_df['second_offset'].min():.1f}s to {combined_df['second_offset'].max():.1f}s")
266
+ st.write(f"Total rows: {len(combined_df)}")
267
+
268
+ for ts in sorted(files.keys()):
269
+ st.text(f"Timestamp: {datetime.fromtimestamp(int(ts))} - File: {files[ts]}")