steffenc commited on
Commit
d561ca5
·
unverified ·
2 Parent(s): a9ac6b7 864cff1

Merge pull request #23 from macrocosm-os/dashboard

Browse files
Files changed (4) hide show
  1. api.py +147 -0
  2. app.py +145 -0
  3. requirements.txt +2 -1
  4. utils.py +411 -0
api.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import atexit
3
+ import datetime
4
+
5
+ from flask import Flask, request, jsonify
6
+ from apscheduler.schedulers.background import BackgroundScheduler
7
+
8
+ import utils
9
+
10
+ app = Flask(__name__)
11
+
12
+
13
+ # Global variables (saves time on loading data)
14
+ state_vars = None
15
+ reload_timestamp = datetime.datetime.now().strftime('%D %T')
16
+
17
+
18
+ def load_data():
19
+ """
20
+ Reload the state variables
21
+ """
22
+ global state_vars, reload_timestamp
23
+ state_vars = utils.load_state_vars()
24
+
25
+ reload_timestamp = datetime.datetime.now().strftime('%D %T')
26
+
27
+ print(f'Reloaded data at {reload_timestamp}')
28
+
29
+
30
+ def start_scheduler():
31
+ scheduler = BackgroundScheduler()
32
+ scheduler.add_job(func=load_data, trigger="interval", seconds=60*30)
33
+ scheduler.start()
34
+
35
+ # Shut down the scheduler when exiting the app
36
+ atexit.register(lambda: scheduler.shutdown())
37
+
38
+
39
+ @app.route('/', methods=['GET'])
40
+ def home():
41
+ return "Welcome to the Bittensor Protein Folding Leaderboard API!"
42
+
43
+
44
+ @app.route('/updated', methods=['GET'])
45
+ def updated():
46
+ return reload_timestamp
47
+
48
+
49
+ @app.route('/data', methods=['GET'])
50
+ @app.route('/data/<period>', methods=['GET'])
51
+ def data(period=None):
52
+ """
53
+ Get the productivity metrics
54
+ """
55
+ assert period in ('24h', None), f"Invalid period: {period}. Must be '24h' or None."
56
+ df = state_vars["dataframe_24h"] if period == '24h' else state_vars["dataframe"]
57
+ return jsonify(
58
+ df.astype(str).to_dict(orient='records')
59
+ )
60
+
61
+ @app.route('/productivity', methods=['GET'])
62
+ @app.route('/productivity/<period>', methods=['GET'])
63
+ def productivity_metrics(period=None):
64
+ """
65
+ Get the productivity metrics
66
+ """
67
+
68
+ assert period in ('24h', None), f"Invalid period: {period}. Must be '24h' or None."
69
+ df = state_vars["dataframe_24h"] if period == '24h' else state_vars["dataframe"]
70
+ return jsonify(
71
+ utils.get_productivity(df)
72
+ )
73
+
74
+
75
+ @app.route('/throughput', methods=['GET'])
76
+ @app.route('/throughput/<period>', methods=['GET'])
77
+ def throughput_metrics(period=None):
78
+ """
79
+ Get the throughput metrics
80
+ """
81
+ assert period in ('24h', None), f"Invalid period: {period}. Must be '24h' or None."
82
+ df = state_vars["dataframe_24h"] if period == '24h' else state_vars["dataframe"]
83
+ return jsonify(utils.get_data_transferred(df))
84
+
85
+
86
+ @app.route('/metagraph', methods=['GET'])
87
+ def metagraph():
88
+ """
89
+ Get the metagraph data
90
+ Returns:
91
+ - metagraph_data: List of dicts (from pandas DataFrame)
92
+ """
93
+
94
+ df_m = state_vars["metagraph"]
95
+
96
+ return jsonify(
97
+ df_m.to_dict(orient='records')
98
+ )
99
+
100
+ @app.route('/leaderboard', methods=['GET'])
101
+ @app.route('/leaderboard/<entity>', methods=['GET'])
102
+ @app.route('/leaderboard/<entity>/<ntop>', methods=['GET'])
103
+ def leaderboard(entity='identity',ntop=10):
104
+ """
105
+ Get the leaderboard data
106
+ Returns:
107
+ - leaderboard_data: List of dicts (from pandas DataFrame)
108
+ """
109
+
110
+ assert entity in utils.ENTITY_CHOICES, f"Invalid entity choice: {entity}"
111
+
112
+ df_miners = utils.get_leaderboard(
113
+ state_vars["metagraph"],
114
+ ntop=int(ntop),
115
+ entity_choice=entity
116
+ )
117
+
118
+ return jsonify(
119
+ df_miners.to_dict(orient='records')
120
+ )
121
+
122
+ @app.route('/validator', methods=['GET'])
123
+ def validator():
124
+ """
125
+ Get the validator data
126
+ Returns:
127
+ - validator_data: List of dicts (from pandas DataFrame)
128
+ """
129
+ df_m = state_vars["metagraph"]
130
+ df_validators = df_m.loc[df_m.validator_trust > 0]
131
+
132
+ return jsonify(
133
+ df_validators.to_dict(orient='records')
134
+ )
135
+
136
+
137
+ if __name__ == '__main__':
138
+
139
+ load_data()
140
+ start_scheduler()
141
+
142
+ app.run(host='0.0.0.0', port=5001, debug=True)
143
+
144
+
145
+ # to test locally
146
+ # curl -X GET http://0.0.0.0:5001/data
147
+
app.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import pandas as pd
3
+ import streamlit as st
4
+ import plotly.express as px
5
+
6
+ import utils
7
+
8
+ _ = """
9
+ [x] Define KPIs: Number of steps, number of completions and total generated tokens
10
+ [x] Data pipeline I: pull run summary data from wandb
11
+ [x] Data pipeline II: pull run event data from wandb (max 500 steps per run)
12
+ [x] Task trends: Number of tasks over time
13
+ [x] Reward trends I: average reward over time, by task
14
+ [x] Reward trends II: average nonzero reward over time, by task
15
+ [x] Reward trends III: average nonzero normalized reward over time, by task
16
+ [x] Explain trends: show release dates to indicate sudden changes
17
+ [ ] Miner trends: associate uids with miner rankings and plot top miner rewards vs network avg
18
+ [ ] Baseline rewards I: compare the network trends with baseline model gpt-3.5-turbo
19
+ [ ] Baseline rewards II: compare the network trends with baseline model gpt-4o
20
+ [ ] Baseline rewards III: compare the network trends with baseline model zephyr
21
+ [ ] Baseline rewards IV: compare the network trends with baseline model solar
22
+ [ ] Baseline rewards V: compare the network trends with baseline model llama3 8B
23
+ [ ] Baseline rewards VI: compare the network trends with baseline model llama3 70B
24
+
25
+ ---------
26
+ """
27
+
28
+ st.title('Prompting Subnet Dashboard')
29
+ st.markdown('<br>', unsafe_allow_html=True)
30
+
31
+ # reload data periodically
32
+ state_vars = utils.load_state_vars()
33
+
34
+ df_runs = state_vars['df_runs']
35
+ df_runs_24h = state_vars['df_runs_24h']
36
+ df_vali = state_vars['df_vali']
37
+ df_events = state_vars['df_events']
38
+ df_task_counts = state_vars['df_task_counts']
39
+ df_m = state_vars['metagraph']
40
+ st.toast(f'Loaded {len(df_runs)} runs')
41
+
42
+ #### ------ PRODUCTIVITY ------
43
+
44
+ # Overview of productivity
45
+ st.subheader('Productivity overview')
46
+ st.info('Productivity metrics show how much data has been created by subnet 1')
47
+
48
+ productivity = utils.get_productivity(df_runs)
49
+ productivity_24h = utils.get_productivity(df_runs_24h)
50
+
51
+
52
+ m1, m2, m3, m4 = st.columns(4)
53
+ m1.metric('Competition duration', f'{productivity.get("duration").days} days')
54
+ m2.metric('Total events', f'{productivity.get("total_events")/1e6:,.2f}M', delta=f'{productivity_24h.get("total_events")/1e6:,.2f}M (24h)')
55
+ m3.metric('Total completions', f'{productivity.get("total_completions")/1e9:,.2f}B', delta=f'{productivity_24h.get("total_completions")/1e9:,.2f}B (24h)')
56
+ m4.metric('Total dataset tokens', f'{productivity.get("total_tokens")/1e9:,.2f}B', delta=f'{productivity_24h.get("total_tokens")/1e9:,.2f}B (24h)')
57
+
58
+ st.markdown('<br>', unsafe_allow_html=True)
59
+
60
+ st.plotly_chart(
61
+ px.area(df_task_counts, y=df_task_counts.columns, title='Data Created by Task',
62
+ labels={'created_at':'','value':'Total data created'},
63
+ ),
64
+ use_container_width=True,
65
+ )
66
+
67
+ st.markdown('<br>', unsafe_allow_html=True)
68
+
69
+ # Overview of productivity
70
+ st.subheader('Improvement overview')
71
+ st.info('Subnet 1 is an endlessly improving system, where miners compete to produce high quality responses to a range of challenging tasks')
72
+
73
+
74
+ TASK_CHOICES = {
75
+ 'Question answering': 'qa',
76
+ 'Summarization': 'summarization',
77
+ 'Date-based question answering': 'date_qa',
78
+ 'Math': 'math',
79
+ 'Generic instruction': 'generic',
80
+ 'Sentiment analysis': 'sentiment',
81
+ 'Translation': 'translation',
82
+ }
83
+
84
+ with st.expander('Advanced settings'):
85
+ c1, c2 = st.columns(2)
86
+ remove_zero_rewards = c1.checkbox('Exclude zero rewards', value=True, help='Remove completions which scored zero rewards (failed responses, timeouts etc.)')
87
+ normalize_rewards = c1.checkbox('Normalize rewards', value=True, help='Scale rewards for each task to a maximium value of 1 (approx)')
88
+ show_releases = c1.checkbox('Show releases', value=False, help='Add annotations which indicate when major releases may have impacted network performance')
89
+ moving_avg_window = c2.slider('Moving avg. window', min_value=1, max_value=30, value=14, help='Window size to smooth data and make long term trends clearer')
90
+
91
+ reward_col = 'normalized_rewards' if normalize_rewards else 'rewards'
92
+
93
+ df_stats = utils.get_reward_stats(df_events, exclude_multiturn=True, freq='1D', remove_zero_rewards=remove_zero_rewards)
94
+
95
+
96
+ task_choice_label = st.radio('Select task', list(TASK_CHOICES.keys()), index=0, horizontal=True)
97
+ task_choice = TASK_CHOICES[task_choice_label]
98
+
99
+ st.plotly_chart(
100
+ # add fillgradient to make it easier to see the trend
101
+ utils.plot_reward_trends(df_stats, task=task_choice, window=moving_avg_window, col=reward_col, annotate=show_releases, task_label=task_choice_label),
102
+ use_container_width=True,
103
+ )
104
+
105
+ st.markdown('<br>', unsafe_allow_html=True)
106
+
107
+
108
+ #### ------ LEADERBOARD ------
109
+
110
+ st.subheader('Leaderboard')
111
+ st.info('The leaderboard shows the top miners by incentive.')
112
+ m1, m2 = st.columns(2)
113
+ ntop = m1.slider('Number of top miners to display', value=10, min_value=3, max_value=50, step=1)
114
+ entity_choice = m2.radio('Select entity', utils.ENTITY_CHOICES, index=0, horizontal=True)
115
+
116
+ df_miners = utils.get_leaderboard(df_m, ntop=ntop, entity_choice=entity_choice)
117
+
118
+ # hide colorbar and don't show y axis
119
+ st.plotly_chart(
120
+ px.bar(df_miners, x='I', color='I', hover_name=entity_choice, text=entity_choice if ntop < 20 else None,
121
+ labels={'I':'Incentive', 'trust':'Trust', 'stake':'Stake', '_index':'Rank'},
122
+ ).update_layout(coloraxis_showscale=False, yaxis_visible=False),
123
+ use_container_width=True,
124
+ )
125
+
126
+
127
+ with st.expander('Show raw metagraph data'):
128
+ st.dataframe(df_m)
129
+
130
+ st.markdown('<br>', unsafe_allow_html=True)
131
+
132
+
133
+ #### ------ LOGGED RUNS ------
134
+
135
+ st.subheader('Logged runs')
136
+ # st.info('The timeline shows the creation and last event time of each run.')
137
+ # st.plotly_chart(
138
+ # px.timeline(df_runs, x_start='created_at', x_end='last_event_at', y='user', color='state',
139
+ # labels={'created_at':'Created at', 'last_event_at':'Last event at', 'username':''},
140
+ # ),
141
+ # use_container_width=True
142
+ # )
143
+
144
+ with st.expander('Show raw run data'):
145
+ st.dataframe(df_runs)
requirements.txt CHANGED
@@ -2,4 +2,5 @@ git+https://github.com/macrocosm-os/prompting.git
2
  aiohttp
3
  deprecated
4
  aiohttp_apispec>=2.2.3
5
- aiofiles
 
 
2
  aiohttp
3
  deprecated
4
  aiohttp_apispec>=2.2.3
5
+ aiofiles
6
+ streamlit
utils.py ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tqdm
3
+ import time
4
+ import glob
5
+ import wandb
6
+ from traceback import print_exc
7
+ import streamlit as st
8
+ import pandas as pd
9
+ import bittensor as bt
10
+ import plotly.express as px
11
+
12
+
13
+ # TODO: Store the runs dataframe (as in sn1 dashboard) and top up with the ones created since the last snapshot
14
+ # TODO: Store relevant wandb data in a database for faster access
15
+
16
+
17
+ MIN_STEPS = 10 # minimum number of steps in wandb run in order to be worth analyzing
18
+ NETUID = 1
19
+ BASE_PATH = 'macrocosmos/prompting-validators'
20
+ NETWORK = 'finney'
21
+ KEYS = ['_step','_timestamp','task','query','reference','challenge','topic','subtopic']
22
+ ABBREV_CHARS = 8
23
+ ENTITY_CHOICES = ('identity', 'hotkey', 'coldkey')
24
+ LOCAL_WANDB_PATH = './data/wandb'
25
+ USERNAME = 'opentensor'
26
+
27
+ api = wandb.Api(timeout=600)
28
+
29
+ IDENTITIES = {
30
+ '5F4tQyWrhfGVcNhoqeiNsR6KjD4wMZ2kfhLj4oHYuyHbZAc3': 'opentensor',
31
+ '5Hddm3iBFD2GLT5ik7LZnT3XJUnRnN8PoeCFgGQgawUVKNm8': 'taostats',
32
+ '5HEo565WAy4Dbq3Sv271SAi7syBSofyfhhwRNjFNSM2gP9M2': 'foundry',
33
+ '5HK5tp6t2S59DywmHRWPBVJeJ86T61KjurYqeooqj8sREpeN': 'bittensor-guru',
34
+ '5FFApaS75bv5pJHfAp2FVLBj9ZaXuFDjEypsaBNc1wCfe52v': 'roundtable-21',
35
+ '5EhvL1FVkQPpMjZX4MAADcW42i3xPSF1KiCpuaxTYVr28sux': 'tao-validator',
36
+ '5FKstHjZkh4v3qAMSBa1oJcHCLjxYZ8SNTSz1opTv4hR7gVB': 'datura',
37
+ '5DvTpiniW9s3APmHRYn8FroUWyfnLtrsid5Mtn5EwMXHN2ed': 'first-tensor',
38
+ '5HbLYXUBy1snPR8nfioQ7GoA9x76EELzEq9j7F32vWUQHm1x': 'tensorplex',
39
+ '5CsvRJXuR955WojnGMdok1hbhffZyB4N5ocrv82f3p5A2zVp': 'owl-ventures',
40
+ '5CXRfP2ekFhe62r7q3vppRajJmGhTi7vwvb2yr79jveZ282w': 'rizzo',
41
+ '5HNQURvmjjYhTSksi8Wfsw676b4owGwfLR2BFAQzG7H3HhYf': 'neural-internet'
42
+ }
43
+
44
+ EXTRACTORS = {
45
+ 'state': lambda x: x.state,
46
+ 'run_id': lambda x: x.id,
47
+ 'run_path': lambda x: os.path.join(BASE_PATH, x.id),
48
+ 'user': lambda x: x.user.name[:16],
49
+ 'username': lambda x: x.user.username[:16],
50
+ 'created_at': lambda x: pd.Timestamp(x.created_at),
51
+ 'last_event_at': lambda x: pd.Timestamp(x.summary.get('_timestamp'), unit='s'),
52
+
53
+ 'netuid': lambda x: x.config.get('netuid'),
54
+ 'mock': lambda x: x.config.get('neuron').get('mock'),
55
+ 'sample_size': lambda x: x.config.get('neuron').get('sample_size'),
56
+ 'timeout': lambda x: x.config.get('neuron').get('timeout'),
57
+ 'epoch_length': lambda x: x.config.get('neuron').get('epoch_length'),
58
+ 'disable_set_weights': lambda x: x.config.get('neuron').get('disable_set_weights'),
59
+
60
+ # This stuff is from the last logged event
61
+ 'num_steps': lambda x: x.summary.get('_step'),
62
+ 'runtime': lambda x: x.summary.get('_runtime'),
63
+ 'query': lambda x: x.summary.get('query'),
64
+ 'challenge': lambda x: x.summary.get('challenge'),
65
+ 'reference': lambda x: x.summary.get('reference'),
66
+ 'completions': lambda x: x.summary.get('completions'),
67
+
68
+ 'version': lambda x: x.tags[0],
69
+ 'spec_version': lambda x: x.tags[1],
70
+ 'vali_hotkey': lambda x: x.tags[2],
71
+ # 'tasks_selected': lambda x: x.tags[3:],
72
+
73
+ # System metrics
74
+ 'disk_read': lambda x: x.system_metrics.get('system.disk.in'),
75
+ 'disk_write': lambda x: x.system_metrics.get('system.disk.out'),
76
+ # Really slow stuff below
77
+ # 'started_at': lambda x: x.metadata.get('startedAt'),
78
+ # 'disk_used': lambda x: x.metadata.get('disk').get('/').get('used'),
79
+ # 'commit': lambda x: x.metadata.get('git').get('commit')
80
+ }
81
+
82
+
83
+ def get_leaderboard(df, ntop=10, entity_choice='identity'):
84
+
85
+ df = df.loc[df.validator_permit==False]
86
+ df.index = range(df.shape[0])
87
+ return df.groupby(entity_choice).I.sum().sort_values().reset_index().tail(ntop)
88
+
89
+ @st.cache_data()
90
+ def get_metagraph(time):
91
+ print(f'Loading metagraph with time {time}')
92
+ subtensor = bt.subtensor(network=NETWORK)
93
+ m = subtensor.metagraph(netuid=NETUID)
94
+ meta_cols = ['I','stake','trust','validator_trust','validator_permit','C','R','E','dividends','last_update']
95
+
96
+ df_m = pd.DataFrame({k: getattr(m, k) for k in meta_cols})
97
+ df_m['uid'] = range(m.n.item())
98
+ df_m['hotkey'] = list(map(lambda a: a.hotkey, m.axons))
99
+ df_m['coldkey'] = list(map(lambda a: a.coldkey, m.axons))
100
+ df_m['ip'] = list(map(lambda a: a.ip, m.axons))
101
+ df_m['port'] = list(map(lambda a: a.port, m.axons))
102
+ df_m['coldkey'] = df_m.coldkey.str[:ABBREV_CHARS]
103
+ df_m['hotkey'] = df_m.hotkey.str[:ABBREV_CHARS]
104
+ df_m['identity'] = df_m.apply(lambda x: f'{x.hotkey} @ uid {x.uid}', axis=1)
105
+ return df_m
106
+
107
+
108
+ @st.cache_data(show_spinner=False)
109
+ def load_downloaded_runs(time, cols=KEYS):
110
+
111
+ list_cols = ['rewards','uids']
112
+ extra_cols = ['turn']
113
+ df_all = pd.DataFrame()
114
+
115
+ progress = st.progress(0, text='Loading downloaded data')
116
+ paths = glob.glob(os.path.join(LOCAL_WANDB_PATH,'*.parquet'))
117
+ for i, path in enumerate(paths):
118
+ run_id = path.split('/')[-1].split('.')[0]
119
+ frame = pd.read_parquet(path).dropna(subset=cols)
120
+ frame._timestamp = frame._timestamp.apply(pd.to_datetime, unit='s')
121
+ # handle missing extra cols such as turn which depend on the version of the codebase
122
+ found_extra_cols = [c for c in frame.columns if c in extra_cols]
123
+ df_long = frame[cols+list_cols+found_extra_cols].explode(list_cols)
124
+
125
+ prog_msg = f'Downloading data {i/len(paths)*100:.0f}%'
126
+ progress.progress(i/len(paths), text=f'{prog_msg}... **downloading** `{run_id}`')
127
+
128
+ df_all = pd.concat([df_all, df_long.assign(run_id=run_id)], ignore_index=True)
129
+
130
+ progress.empty()
131
+
132
+ # Ensure we have consistent naming schema for tasks
133
+ task_mapping = {
134
+ 'date-based question answering': 'date_qa',
135
+ 'question-answering': 'qa',
136
+ }
137
+ df_all.task = df_all.task.apply(lambda x: task_mapping.get(x, x))
138
+
139
+ # Runs which do not have a turn field are imputed to be turn zero (single turn)
140
+ df_all.turn.fillna(0, inplace=True)
141
+
142
+ df_all.sort_values(by=['_timestamp'], inplace=True)
143
+
144
+ return df_all
145
+
146
+
147
+ @st.cache_data(show_spinner=False)
148
+ def build_data(timestamp=None, path=BASE_PATH, min_steps=MIN_STEPS, use_cache=True):
149
+
150
+ save_path = '_saved_runs.csv'
151
+ filters = {}
152
+ df = pd.DataFrame()
153
+ # Load the last saved runs so that we only need to update the new ones
154
+ if use_cache and os.path.exists(save_path):
155
+ df = pd.read_csv(save_path)
156
+ df['created_at'] = pd.to_datetime(df['created_at'])
157
+ df['last_event_at'] = pd.to_datetime(df['last_event_at'])
158
+
159
+ timestamp_str = df['last_event_at'].max().isoformat()
160
+ filters.update({'updated_at': {'$gte': timestamp_str}})
161
+
162
+ progress = st.progress(0, text='Loading data')
163
+
164
+ runs = api.runs(path, filters=filters)
165
+
166
+ run_data = []
167
+ n_events = 0
168
+ for i, run in enumerate(tqdm.tqdm(runs, total=len(runs))):
169
+ num_steps = run.summary.get('_step',0)
170
+ if num_steps<min_steps:
171
+ continue
172
+ n_events += num_steps
173
+ prog_msg = f'Loading data {i/len(runs)*100:.0f}%, (total {n_events:,.0f} events)'
174
+ progress.progress(i/len(runs),text=f'{prog_msg}... **downloading** `{os.path.join(*run.path)}`')
175
+
176
+ run_data.append(run)
177
+
178
+ progress.empty()
179
+
180
+ df_new = pd.DataFrame([{k: func(run) for k, func in EXTRACTORS.items()} for run in tqdm.tqdm(run_data, total=len(run_data))])
181
+ df = pd.concat([df, df_new], ignore_index=True)
182
+ df['duration'] = (df.last_event_at - df.created_at).round('s')
183
+ df['identity'] = df['vali_hotkey'].map(IDENTITIES).fillna('unknown')
184
+ df['vali_hotkey'] = df['vali_hotkey'].str[:ABBREV_CHARS]
185
+
186
+ # Drop events that are not related to validator queries
187
+ df.dropna(subset='query', inplace=True)
188
+
189
+ print(df.completions.apply(type).value_counts())
190
+ # Assumes completions is in the frame
191
+ df['completions'] = df['completions'].apply(lambda x: x if isinstance(x, list) else eval(x))
192
+
193
+ df['completion_words'] = df.completions.apply(lambda x: sum([len(xx.split()) for xx in x]) if isinstance(x, list) else 0)
194
+ df['validator_words'] = df.apply(lambda x: len(str(x.query).split()) + len(str(x.challenge).split()) + len(str(x.reference).split()), axis=1 )
195
+
196
+ df.to_csv(save_path, index=False)
197
+
198
+ return df
199
+
200
+ @st.cache_data()
201
+ def normalize_rewards(df, turn=0, percentile=0.98):
202
+ top_reward_stats = df.loc[df.turn==turn].astype({'rewards':float}).groupby('task').rewards.quantile(percentile)
203
+
204
+ df['best_reward'] = df.task.map(top_reward_stats)
205
+ df['normalized_rewards'] = df['rewards'].astype(float) / df['best_reward']
206
+ return df
207
+
208
+ @st.cache_data(show_spinner=False)
209
+ def download_runs(time, df_vali):
210
+
211
+ pbar = tqdm.tqdm(df_vali.index, total=len(df_vali))
212
+
213
+ progress = st.progress(0, text='Loading data')
214
+
215
+ for i, idx in enumerate(pbar):
216
+ row = df_vali.loc[idx]
217
+
218
+ prog_msg = f'Downloading data {i/len(df_vali)*100:.0f}%'
219
+ progress.progress(i/len(df_vali), text=f'{prog_msg}... **downloading** `{os.path.join(*row.run_id)}`')
220
+
221
+ save_path = f'data/wandb/{row.run_id}.parquet'
222
+ if os.path.exists(save_path):
223
+ pbar.set_description(f'>> Skipping {row.run_id!r} because file {save_path!r} already exists')
224
+ continue
225
+
226
+ try:
227
+ pbar.set_description(f'* Downloading run {row.run_id!r}', flush=True)
228
+ run = api.run(row.run_path)
229
+
230
+ # By default we just download a subset of events (500 most recent)
231
+ df = run.history()
232
+ df.to_parquet(save_path)
233
+ except KeyboardInterrupt:
234
+ break
235
+ except Exception as e:
236
+ pbar.set_description(f'- Something went wrong with {row.run_id!r}: {print_exc()}\n')
237
+
238
+ progress.empty()
239
+
240
+
241
+ def get_productivity(df_runs):
242
+
243
+ total_duration = df_runs.last_event_at.max() - df_runs.created_at.min()
244
+ total_steps = df_runs.num_steps.sum()
245
+ total_completions = (df_runs.num_steps*df_runs.sample_size).sum()
246
+ total_completion_words = (df_runs.num_steps*df_runs.completion_words).sum()
247
+ total_completion_tokens = round(total_completion_words/0.75)
248
+ total_validator_words = (df_runs.num_steps*df_runs.apply(lambda x: len(str(x.query).split()) + len(str(x.challenge).split()) + len(str(x.reference).split()), axis=1 )).sum()
249
+ total_validator_tokens = round(total_validator_words/0.75)
250
+ total_dataset_tokens = total_completion_tokens + total_validator_tokens
251
+
252
+ return {
253
+ 'duration':total_duration,
254
+ 'total_events':total_steps,
255
+ 'total_completions':total_completions,
256
+ 'total_completion_tokens':total_completion_tokens,
257
+ 'total_validator_tokens':total_validator_tokens,
258
+ 'total_tokens':total_dataset_tokens,
259
+ }
260
+
261
+ @st.cache_data(show_spinner=False)
262
+ def get_reward_stats(df, exclude_multiturn=True, freq='1D', remove_zero_rewards=True, agg='mean', date_min='2024-01-22', date_max='2024-06-25'):
263
+
264
+ df = df.loc[df._timestamp.between(pd.Timestamp(date_min), pd.Timestamp(date_max))]
265
+ if exclude_multiturn:
266
+ df = df.loc[df.turn == 0]
267
+ if remove_zero_rewards:
268
+ df = df.loc[df.rewards > 0]
269
+
270
+ groups = ['run_id',pd.Grouper(key='_timestamp',freq=freq),'task']
271
+ return df.groupby(groups).agg({'rewards':agg, 'normalized_rewards':agg})
272
+
273
+ def get_release_dates():
274
+ release_dates = pd.DataFrame([
275
+ {'version': '1.0.0', 'release_date': pd.Timestamp(month=1, day=22, year=2024), 'note': '', 'model': 'zephyr', 'tasks_affected':['qa','summarization']},
276
+ {'version': '1.0.1', 'release_date': pd.Timestamp(month=1, day=22, year=2024), 'note': '', 'model': 'zephyr', 'tasks_affected':[]},
277
+ {'version': '1.0.2', 'release_date': pd.Timestamp(month=1, day=24, year=2024), 'note': '', 'model': 'zephyr', 'tasks_affected':['qa','summarization']},
278
+ {'version': '1.0.3', 'release_date': pd.Timestamp(month=2, day=14, year=2024), 'note': '', 'model': 'zephyr', 'tasks_affected':[]},
279
+ {'version': '1.0.4', 'release_date': pd.Timestamp(month=2, day=15, year=2024), 'note': '', 'model': 'zephyr', 'tasks_affected':[]},
280
+ {'version': '1.1.0', 'release_date': pd.Timestamp(month=2, day=21, year=2024), 'note': 'decay scores', 'model': 'zephyr', 'tasks_affected':['date_qa','math']},
281
+ {'version': '1.1.1', 'release_date': pd.Timestamp(month=2, day=28, year=2024), 'note': 'reduce penalty weight', 'model': 'zephyr', 'tasks_affected':['date_qa','qa','summarization']},
282
+ {'version': '1.1.2', 'release_date': pd.Timestamp(month=2, day=29, year=2024), 'note': '', 'model': 'zephyr', 'tasks_affected':[]},
283
+ {'version': '1.1.3', 'release_date': pd.Timestamp(month=3, day=11, year=2024), 'note': '', 'model': 'zephyr', 'tasks_affected':[]},
284
+ {'version': '1.2.0', 'release_date': pd.Timestamp(month=3, day=19, year=2024), 'note': 'vllm', 'model': 'zephyr', 'tasks_affected':[]},
285
+ {'version': '1.3.0', 'release_date': pd.Timestamp(month=3, day=27, year=2024), 'note': '', 'model': 'solar', 'tasks_affected':['all','math']},
286
+ {'version': '2.0.0', 'release_date': pd.Timestamp(month=4, day=4, year=2024), 'note': 'streaming', 'model': 'solar', 'tasks_affected':['math','qa','summarization']},
287
+ {'version': '2.1.0', 'release_date': pd.Timestamp(month=4, day=18, year=2024), 'note': 'chattensor prompt', 'model': 'solar', 'tasks_affected':['generic']},
288
+ {'version': '2.2.0', 'release_date': pd.Timestamp(month=5, day=1, year=2024), 'note': 'multiturn + paraphrase', 'model': 'solar', 'tasks_affected':['sentiment','translation','math']},
289
+ {'version': '2.3.0', 'release_date': pd.Timestamp(month=5, day=20, year=2024), 'note': 'llama + freeform date', 'model': 'llama', 'tasks_affected':['all','date_qa']},
290
+ {'version': '2.3.1', 'release_date': pd.Timestamp(month=5, day=21, year=2024), 'note': '', 'model': 'llama', 'tasks_affected':['date_qa']},
291
+ {'version': '2.4.0', 'release_date': pd.Timestamp(month=6, day=5, year=2024), 'note': 'streaming penalty', 'model': 'llama', 'tasks_affected':[]},
292
+ {'version': '2.4.1', 'release_date': pd.Timestamp(month=6, day=6, year=2024), 'note': '', 'model': 'llama', 'tasks_affected':[]},
293
+ {'version': '2.4.2', 'release_date': pd.Timestamp(month=6, day=7, year=2024), 'note': '', 'model': 'llama', 'tasks_affected':[]},
294
+ {'version': '2.4.2', 'release_date': pd.Timestamp(month=6, day=7, year=2024), 'note': '', 'model': 'llama', 'tasks_affected':[]},
295
+ {'version': '2.5.0', 'release_date': pd.Timestamp(month=6, day=18, year=2024), 'note': 'reduce multiturn', 'model': 'llama', 'tasks_affected':['translation','sentiment']},
296
+ {'version': '2.5.1', 'release_date': pd.Timestamp(month=6, day=25, year=2024), 'note': 'reduce timeout', 'model': 'llama', 'tasks_affected':[]},
297
+ ])
298
+ return release_dates
299
+
300
+
301
+ def plot_reward_trends(df_stats, task='qa', window=14, col='normalized_reward', annotate=False, task_label='Question answering'):
302
+
303
+ stats = df_stats.reset_index()
304
+ release_dates = get_release_dates()
305
+ stats_task = stats.loc[(stats.task == task)].sort_values(by='_timestamp')
306
+ stats_task['rewards_ma'] = stats_task[col].rolling(window, min_periods=0).mean()
307
+ fig = px.area(stats_task,
308
+ x='_timestamp', y='rewards_ma',
309
+ title=f'Reward Trend for {task_label} Task',
310
+ labels={'rewards_ma': f'Rewards [{window} day avg.]','_timestamp':''},
311
+ width=800,height=600,
312
+ )
313
+
314
+ if not annotate:
315
+ return fig
316
+
317
+ # Add annotations based on relevant releases
318
+ for idx, row in release_dates.iterrows():
319
+ if all(col not in row['tasks_affected'] for col in ['all',task]):
320
+ continue
321
+ # TODO add annotation or something
322
+ fig.add_vline(row['release_date'], line_color='red', opacity=0.6, line_dash='dot', line_width=1)#, annotation_text=str(v))
323
+
324
+ return fig
325
+
326
+ @st.cache_data()
327
+ def get_task_counts(df_runs, df_events):
328
+ # Get mapping from run id to prompting repo version
329
+ run_to_version = df_runs.set_index('run_id').version.to_dict()
330
+
331
+ df_events['version'] = df_events.run_id.map(run_to_version)
332
+
333
+ def version_to_spec(version):
334
+ major, minor, patch = version.split('.')
335
+ return 10_000 * major + 100 * minor + patch
336
+
337
+ def get_closest_prev_version(version, my_versions):
338
+
339
+ ref_spec = version_to_spec(version)
340
+ my_specs = list(map(version_to_spec, my_versions))
341
+
342
+ match = my_specs[0]
343
+ for spec in my_specs[1:]:
344
+ if spec>ref_spec:
345
+ break
346
+
347
+ match = spec
348
+
349
+ return my_versions[my_specs.index(match)]
350
+
351
+ # Now estimate the distribution of tasks for each version using the event data
352
+ task_rate = df_events.groupby('version').task.value_counts(normalize=True).unstack().fillna(0)
353
+ # Impute missing versions
354
+ for v in sorted(df_runs.version.unique()):
355
+ if v not in task_rate.index:
356
+ prev_version = get_closest_prev_version(v, list(task_rate.index))
357
+ print(f'Imputing version {v} with task rate from closes previous version {prev_version!r}')
358
+ task_rate.loc[v] = task_rate.loc[prev_version]
359
+
360
+ # get esimated number of each task generated in every run using summary dataframe
361
+ task_counts = df_runs.set_index('created_at').sort_index().apply(lambda x: round(task_rate.loc[x.version]*x.num_steps), axis=1).cumsum()
362
+ return task_counts
363
+
364
+
365
+ def load_state_vars(username=USERNAME, percentile=0.95):
366
+
367
+ UPDATE_INTERVAL = 600
368
+
369
+ df_runs = build_data(time.time()//UPDATE_INTERVAL, use_cache=True)
370
+
371
+ df_runs = df_runs.loc[df_runs.netuid.isin([1,61,102])]
372
+ st.toast(f'Loaded {len(df_runs)} runs')
373
+
374
+ df_vali = df_runs.loc[df_runs.username == username]
375
+
376
+ download_runs(time.time()//UPDATE_INTERVAL, df_vali)
377
+
378
+ df_events = load_downloaded_runs(time.time()//UPDATE_INTERVAL)
379
+ df_events = normalize_rewards(df_events, percentile=percentile)
380
+
381
+ yesterday = pd.Timestamp.now() - pd.Timedelta('1d')
382
+ runs_alive_24h_ago = (df_runs.last_event_at > yesterday)
383
+
384
+ df_runs_24h = df_runs.loc[runs_alive_24h_ago]
385
+
386
+ # weight factor indicates the fraction of events that happened within the last 24 hour.
387
+ fraction = 1 - (yesterday - df_runs_24h.created_at) / (pd.Timestamp.now()- df_runs_24h.created_at)
388
+ df_runs_24h['fraction'] = fraction.clip(0,1)
389
+ df_runs_24h['num_steps'] *= fraction.clip(0,1)
390
+
391
+ df_task_counts = get_task_counts(df_runs, df_events)
392
+
393
+ df_m = get_metagraph(time.time()//UPDATE_INTERVAL)
394
+
395
+ return {
396
+ 'df_runs': df_runs,
397
+ 'df_runs_24h': df_runs_24h,
398
+ 'df_vali': df_vali,
399
+ 'df_events': df_events,
400
+ 'metagraph': df_m,
401
+ 'df_task_counts': df_task_counts
402
+ }
403
+
404
+
405
+ if __name__ == '__main__':
406
+
407
+ print('Loading runs')
408
+ df = load_runs()
409
+
410
+ df.to_csv('test_wandb_data.csv', index=False)
411
+ print(df)