steffenc commited on
Commit
b45152a
·
1 Parent(s): 101093d

Major changes for efficiency, detail and presentation

Browse files
Files changed (4) hide show
  1. meta_plotting.py +8 -8
  2. meta_utils.py +20 -9
  3. metagraph.py +0 -169
  4. multistats.py +162 -62
meta_plotting.py CHANGED
@@ -1,7 +1,7 @@
1
  import numpy as np
2
  import plotly.express as px
3
 
4
- def plot_trace(df, col='emission', agg='mean', ntop=10, hotkeys=None, hotkey_regex=None, abbrev=8, type='Miners'):
5
 
6
  if hotkeys is not None:
7
  df = df.loc[df.hotkey.isin(hotkeys)]
@@ -10,14 +10,14 @@ def plot_trace(df, col='emission', agg='mean', ntop=10, hotkeys=None, hotkey_reg
10
 
11
  top_miners = df.groupby('hotkey')[col].agg(agg).sort_values(ascending=False)
12
 
13
- stats = df.loc[df.hotkey.isin(top_miners.index[:ntop])].sort_values(by=['timestamp'])
14
 
15
  stats['hotkey_abbrev'] = stats.hotkey.str[:abbrev]
16
  stats['coldkey_abbrev'] = stats.coldkey.str[:abbrev]
17
  stats['rank'] = stats.hotkey.map({k:i for i,k in enumerate(top_miners.index, start=1)})
18
 
19
- return px.line(stats.sort_values(by=['timestamp','rank']),
20
- x='timestamp', y=col, color='coldkey_abbrev', line_group='hotkey_abbrev',
21
  hover_data=['hotkey','rank'],
22
  labels={col:col.title(),'timestamp':'','coldkey_abbrev':f'Coldkey (first {abbrev} chars)','hotkey_abbrev':f'Hotkey (first {abbrev} chars)'},
23
  title=f'Top {ntop} {type}, by {col.title()}',
@@ -25,18 +25,18 @@ def plot_trace(df, col='emission', agg='mean', ntop=10, hotkeys=None, hotkey_reg
25
  ).update_traces(opacity=0.7)
26
 
27
 
28
- def plot_cabals(df, sel_col='coldkey', count_col='hotkey', values=None, ntop=10, abbr=8):
29
 
30
  if values is None:
31
  values = df[sel_col].value_counts().sort_values(ascending=False).index[:ntop].tolist()
32
  print(f'Automatically selected {sel_col!r} = {values!r}')
33
 
34
  df = df.loc[df[sel_col].isin(values)]
35
- rates = df.groupby(['timestamp',sel_col])[count_col].nunique().reset_index()
36
  abbr_col = f'{sel_col} (first {abbr} chars)'
37
  rates[abbr_col] = rates[sel_col].str[:abbr]
38
- return px.line(rates.melt(id_vars=['timestamp',sel_col,abbr_col]),
39
- x='timestamp', y='value', color=abbr_col,
40
  #facet_col='variable', facet_col_wrap=1,
41
  labels={'value':f'Number of Unique {count_col.title()}s per {sel_col.title()}','timestamp':''},
42
  category_orders={abbr_col:[ v[:abbr] for v in values]},
 
1
  import numpy as np
2
  import plotly.express as px
3
 
4
+ def plot_trace(df, col='emission', agg='mean', time_col='timestamp', ntop=10, hotkeys=None, hotkey_regex=None, abbrev=8, type='Miners'):
5
 
6
  if hotkeys is not None:
7
  df = df.loc[df.hotkey.isin(hotkeys)]
 
10
 
11
  top_miners = df.groupby('hotkey')[col].agg(agg).sort_values(ascending=False)
12
 
13
+ stats = df.loc[df.hotkey.isin(top_miners.index[:ntop])].sort_values(by=time_col)
14
 
15
  stats['hotkey_abbrev'] = stats.hotkey.str[:abbrev]
16
  stats['coldkey_abbrev'] = stats.coldkey.str[:abbrev]
17
  stats['rank'] = stats.hotkey.map({k:i for i,k in enumerate(top_miners.index, start=1)})
18
 
19
+ return px.line(stats.sort_values(by=[time_col,'rank']),
20
+ x=time_col, y=col, color='coldkey_abbrev', line_group='hotkey_abbrev',
21
  hover_data=['hotkey','rank'],
22
  labels={col:col.title(),'timestamp':'','coldkey_abbrev':f'Coldkey (first {abbrev} chars)','hotkey_abbrev':f'Hotkey (first {abbrev} chars)'},
23
  title=f'Top {ntop} {type}, by {col.title()}',
 
25
  ).update_traces(opacity=0.7)
26
 
27
 
28
+ def plot_cabals(df, sel_col='coldkey', count_col='hotkey', time_col='timestamp', values=None, ntop=10, abbr=8):
29
 
30
  if values is None:
31
  values = df[sel_col].value_counts().sort_values(ascending=False).index[:ntop].tolist()
32
  print(f'Automatically selected {sel_col!r} = {values!r}')
33
 
34
  df = df.loc[df[sel_col].isin(values)]
35
+ rates = df.groupby([time_col,sel_col])[count_col].nunique().reset_index()
36
  abbr_col = f'{sel_col} (first {abbr} chars)'
37
  rates[abbr_col] = rates[sel_col].str[:abbr]
38
+ return px.line(rates.melt(id_vars=[time_col,sel_col,abbr_col]),
39
+ x=time_col, y='value', color=abbr_col,
40
  #facet_col='variable', facet_col_wrap=1,
41
  labels={'value':f'Number of Unique {count_col.title()}s per {sel_col.title()}','timestamp':''},
42
  category_orders={abbr_col:[ v[:abbr] for v in values]},
meta_utils.py CHANGED
@@ -1,10 +1,15 @@
1
  import os
2
  import glob
3
  import tqdm
4
- import pickle
5
  import subprocess
6
  import pandas as pd
 
 
7
 
 
 
 
8
 
9
  def run_subprocess(*args):
10
  # Trigger the multigraph.py script to run and save metagraph snapshots
@@ -18,31 +23,37 @@ def load_metagraph(path, extra_cols=None, rm_cols=None):
18
 
19
  df = pd.DataFrame(metagraph.axons)
20
  df['block'] = metagraph.block.item()
 
21
  df['difficulty'] = metagraph.difficulty
22
  for c in extra_cols:
23
  vals = getattr(metagraph,c)
24
  df[c] = vals
25
 
26
  return df.drop(columns=rm_cols)
27
-
 
28
  def load_metagraphs(block_start, block_end, block_step=1000, datadir='data/metagraph/1/', extra_cols=None):
29
 
30
  if extra_cols is None:
31
  extra_cols = ['total_stake','ranks','incentive','emission','consensus','trust','validator_trust','dividends']
32
 
33
  blocks = range(block_start, block_end, block_step)
34
- filenames = sorted(path for path in os.listdir(datadir) if int(path.split('.')[0]) in blocks)
35
-
 
 
36
  metagraphs = []
37
 
38
  pbar = tqdm.tqdm(filenames)
39
  for filename in pbar:
40
  pbar.set_description(f'Processing {filename}')
41
 
42
- metagraph = load_metagraph(os.path.join(datadir, filename), extra_cols=extra_cols, rm_cols=['protocol','placeholder1','placeholder2'])
43
-
44
- metagraphs.append(metagraph)
45
-
 
 
 
46
  return pd.concat(metagraphs)
47
 
48
- load_metagraphs(block_start=700_000, block_end=800_000, block_step=1000)
 
1
  import os
2
  import glob
3
  import tqdm
4
+ import dill as pickle
5
  import subprocess
6
  import pandas as pd
7
+ import datetime
8
+ from functools import lru_cache
9
 
10
+ block_time_500k = datetime.datetime(2023, 5, 29, 5, 29, 0)
11
+ block_time_800k = datetime.datetime(2023, 7, 9, 21, 32, 48)
12
+ dt = (pd.Timestamp(block_time_800k)-pd.Timestamp(block_time_500k))/(800_000-500_000)
13
 
14
  def run_subprocess(*args):
15
  # Trigger the multigraph.py script to run and save metagraph snapshots
 
23
 
24
  df = pd.DataFrame(metagraph.axons)
25
  df['block'] = metagraph.block.item()
26
+ df['timestamp'] = block_time_500k + dt*(df['block']-500_000)
27
  df['difficulty'] = metagraph.difficulty
28
  for c in extra_cols:
29
  vals = getattr(metagraph,c)
30
  df[c] = vals
31
 
32
  return df.drop(columns=rm_cols)
33
+
34
+ @lru_cache(maxsize=16)
35
  def load_metagraphs(block_start, block_end, block_step=1000, datadir='data/metagraph/1/', extra_cols=None):
36
 
37
  if extra_cols is None:
38
  extra_cols = ['total_stake','ranks','incentive','emission','consensus','trust','validator_trust','dividends']
39
 
40
  blocks = range(block_start, block_end, block_step)
41
+ print(f'Loading blocks {blocks[0]}-{blocks[-1]} from {datadir}')
42
+ filenames = sorted(filename for filename in os.listdir(datadir) if int(filename.split('.')[0]) in blocks)
43
+ print(f'Found {len(filenames)} files in {datadir}')
44
+
45
  metagraphs = []
46
 
47
  pbar = tqdm.tqdm(filenames)
48
  for filename in pbar:
49
  pbar.set_description(f'Processing {filename}')
50
 
51
+ try:
52
+ metagraph = load_metagraph(os.path.join(datadir, filename), extra_cols=extra_cols, rm_cols=['protocol','placeholder1','placeholder2'])
53
+
54
+ metagraphs.append(metagraph)
55
+ except Exception as e:
56
+ print(f'filename {filename!r} generated an exception: { e }')
57
+
58
  return pd.concat(metagraphs)
59
 
 
metagraph.py DELETED
@@ -1,169 +0,0 @@
1
- import streamlit as st
2
- from meta_utils import run_subprocess, load_metagraphs
3
- # from opendashboards.assets import io, inspect, metric, plot
4
- from meta_plotting import plot_trace, plot_cabals
5
-
6
- DEFAULT_SRC = 'miner'
7
- DEFAULT_NTOP = 10
8
- DEFAULT_UID_NTOP = 10
9
-
10
- # Set app config
11
- st.set_page_config(
12
- page_title='Validator Dashboard',
13
- menu_items={
14
- 'Report a bug': "https://github.com/opentensor/dashboards/issues",
15
- 'About': """
16
- This dashboard is part of the OpenTensor project. \n
17
- """
18
- },
19
- layout = "centered"
20
- )
21
-
22
- st.title('Metagraph :red[Analysis] Dashboard :eyes:')
23
- # add vertical space
24
- st.markdown('#')
25
- st.markdown('#')
26
-
27
-
28
- with st.spinner(text=f'Loading data...'):
29
- df = load_metagraphs()
30
-
31
- blocks = df.block.unique()
32
-
33
- # metric.wandb(df_runs)
34
-
35
- # add vertical space
36
- st.markdown('#')
37
- st.markdown('#')
38
-
39
- tab1, tab2, tab3, tab4 = st.tabs(["Health", "Miners", "Validators", "Block"])
40
-
41
-
42
- ### Wandb Runs ###
43
- with tab1:
44
-
45
- st.markdown('#')
46
- st.header(":violet[Wandb] Runs")
47
-
48
- run_msg = st.info("Select a single run or compare multiple runs")
49
- selected_runs = st.multiselect(f'Runs ({len(df_runs)})', df_runs.id, default=DEFAULT_SELECTED_RUNS, key='runs')
50
-
51
- # Load data if new runs selected
52
- if not selected_runs:
53
- # open a dialog to select runs
54
- run_msg.error("Please select at least one run")
55
- st.snow()
56
- st.stop()
57
-
58
- df = io.load_data(df_runs.loc[df_runs.id.isin(selected_runs)], load=True, save=True)
59
- df_long = inspect.explode_data(df)
60
- df_weights = inspect.weights(df)
61
-
62
- metric.runs(df, df_long, selected_runs)
63
-
64
- with st.expander(f'Show :violet[raw] data for {len(selected_runs)} selected runs'):
65
- inspect.run_event_data(df_runs,df, selected_runs)
66
-
67
-
68
- ### UID Health ###
69
- with tab2:
70
-
71
- st.markdown('#')
72
- st.header("UID :violet[Health]")
73
- st.info(f"Showing UID health metrics for **{len(selected_runs)} selected runs**")
74
-
75
- uid_src = st.radio('Select one:', ['followup', 'answer'], horizontal=True, key='uid_src')
76
-
77
- metric.uids(df_long, uid_src)
78
-
79
- with st.expander(f'Show UID **{uid_src}** weights data for **{len(selected_runs)} selected runs**'):
80
-
81
- uids = st.multiselect('UID:', sorted(df_long[f'{uid_src}_uids'].unique()), key='uid')
82
- st.markdown('#')
83
- st.subheader(f"UID {uid_src.title()} :violet[Weights]")
84
-
85
- plot.weights(
86
- df_weights,
87
- uids=uids,
88
- )
89
-
90
- with st.expander(f'Show UID **{uid_src}** leaderboard data for **{len(selected_runs)} selected runs**'):
91
-
92
- st.markdown('#')
93
- st.subheader(f"UID {uid_src.title()} :violet[Leaderboard]")
94
- uid_col1, uid_col2 = st.columns(2)
95
- uid_ntop = uid_col1.slider('Number of UIDs:', min_value=1, max_value=50, value=DEFAULT_UID_NTOP, key='uid_ntop')
96
- uid_agg = uid_col2.selectbox('Aggregation:', ('mean','min','max','size','nunique'), key='uid_agg')
97
-
98
- plot.leaderboard(
99
- df,
100
- ntop=uid_ntop,
101
- group_on=f'{uid_src}_uids',
102
- agg_col=f'{uid_src}_rewards',
103
- agg=uid_agg
104
- )
105
-
106
-
107
- with st.expander(f'Show UID **{uid_src}** diversity data for **{len(selected_runs)} selected runs**'):
108
-
109
- st.markdown('#')
110
- st.subheader(f"UID {uid_src.title()} :violet[Diversity]")
111
- rm_failed = st.checkbox(f'Remove failed **{uid_src}** completions', value=True)
112
- plot.uid_diversty(df, rm_failed)
113
-
114
-
115
- ### Completions ###
116
- with tab3:
117
-
118
- st.markdown('#')
119
- st.subheader('Completion :violet[Leaderboard]')
120
- completion_info = st.empty()
121
-
122
- msg_col1, msg_col2 = st.columns(2)
123
- completion_src = msg_col1.radio('Select one:', ['followup', 'answer'], horizontal=True, key='completion_src')
124
- completion_info.info(f"Showing **{completion_src}** completions for **{len(selected_runs)} selected runs**")
125
-
126
- completion_ntop = msg_col2.slider('Top k:', min_value=1, max_value=50, value=DEFAULT_COMPLETION_NTOP, key='completion_ntop')
127
-
128
- completion_col = f'{completion_src}_completions'
129
- reward_col = f'{completion_src}_rewards'
130
- uid_col = f'{completion_src}_uids'
131
-
132
- completions = inspect.completions(df_long, completion_col)
133
-
134
- # Get completions with highest average rewards
135
- plot.leaderboard(
136
- df,
137
- ntop=completion_ntop,
138
- group_on=completion_col,
139
- agg_col=reward_col,
140
- agg='mean',
141
- alias=True
142
- )
143
-
144
- with st.expander(f'Show **{completion_src}** completion rewards data for **{len(selected_runs)} selected runs**'):
145
-
146
- st.markdown('#')
147
- st.subheader('Completion :violet[Rewards]')
148
-
149
- completion_select = st.multiselect('Completions:', completions.index, default=completions.index[:3].tolist())
150
- # completion_regex = st.text_input('Completion regex:', value='', key='completion_regex')
151
-
152
- plot.completion_rewards(
153
- df,
154
- completion_col=completion_col,
155
- reward_col=reward_col,
156
- uid_col=uid_col,
157
- ntop=completion_ntop,
158
- completions=completion_select,
159
- )
160
-
161
-
162
- ### Prompt-based scoring ###
163
- with tab4:
164
- # coming soon
165
- st.info('Prompt-based scoring coming soon')
166
-
167
- # st.dataframe(df_long_long.filter(regex=prompt_src).head())
168
-
169
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
multistats.py CHANGED
@@ -1,23 +1,24 @@
1
  import os
2
- import warnings
3
  import re
 
4
  import tqdm
5
  import wandb
6
- from traceback import print_exc
7
  import plotly.express as px
8
  import pandas as pd
9
  from concurrent.futures import ProcessPoolExecutor
10
 
11
  import opendashboards.utils.utils as utils
 
12
 
13
  from IPython.display import display
14
 
15
  api= wandb.Api(timeout=60)
16
  wandb.login(anonymous="allow")
17
 
18
- def pull_wandb_runs(project='openvalidators', filters=None, min_steps=50, max_steps=100_000, ntop=10, summary_filters=None ):
19
  # TODO: speed this up by storing older runs
20
-
21
  all_runs = api.runs(project, filters=filters)
22
  print(f'Using {ntop}/{len(all_runs)} runs with more than {min_steps} events')
23
  pbar = tqdm.tqdm(all_runs)
@@ -29,6 +30,8 @@ def pull_wandb_runs(project='openvalidators', filters=None, min_steps=50, max_st
29
  summary = run.summary
30
  if summary_filters is not None and not summary_filters(summary):
31
  continue
 
 
32
  step = summary.get('_step',0)
33
  if step < min_steps or step > max_steps:
34
  # warnings.warn(f'Skipped run `{run.name}` because it contains {step} events (<{min_steps})')
@@ -60,6 +63,7 @@ def pull_wandb_runs(project='openvalidators', filters=None, min_steps=50, max_st
60
  'start_time': pd.to_datetime(end_time-duration, unit="s"),
61
  'end_time': pd.to_datetime(end_time, unit="s"),
62
  'duration': pd.to_timedelta(duration, unit="s").round('s'),
 
63
  **tags
64
  })
65
  n_events += step
@@ -85,38 +89,60 @@ def plot_gantt(df_runs):
85
  fig.update_yaxes(tickfont_size=8, title='')
86
  fig.show()
87
 
88
- def load_data(run_id, run_path=None, load=True, save=False, timeout=30):
89
 
90
- file_path = os.path.join('data/runs/',f'history-{run_id}.csv')
 
 
 
 
 
 
 
 
 
 
91
 
92
  if load and os.path.exists(file_path):
93
- df = pd.read_csv(file_path, nrows=None)
94
  # filter out events with missing step length
95
  df = df.loc[df.step_length.notna()]
96
 
97
  # detect list columns which as stored as strings
98
  list_cols = [c for c in df.columns if df[c].dtype == "object" and df[c].str.startswith("[").all()]
99
  # convert string representation of list to list
100
- df[list_cols] = df[list_cols].applymap(eval, na_action='ignore')
 
 
 
 
101
 
102
  else:
103
  # Download the history from wandb and add metadata
104
  run = api.run(run_path)
105
  df = pd.DataFrame(list(run.scan_history()))
106
 
 
 
 
107
  print(f'Downloaded {df.shape[0]} events from {run_path!r} with id {run_id!r}')
108
 
 
 
 
 
 
109
  if save:
110
- df.to_csv(file_path, index=False)
111
 
112
  # Convert timestamp to datetime.
113
  df._timestamp = pd.to_datetime(df._timestamp, unit="s")
114
  return df.sort_values("_timestamp")
115
 
116
 
117
- def calculate_stats(df_long, rm_failed=True, rm_zero_reward=True, freq='H', save_path=None ):
118
 
119
  df_long._timestamp = pd.to_datetime(df_long._timestamp)
 
120
  # if dataframe has columns such as followup_completions and answer_completions, convert to multiple rows
121
  if 'completions' not in df_long.columns:
122
  df_long.set_index(['_timestamp','run_id'], inplace=True)
@@ -126,79 +152,144 @@ def calculate_stats(df_long, rm_failed=True, rm_zero_reward=True, freq='H', save
126
  ])
127
  df_long = df_schema.reset_index()
128
 
129
- if rm_failed:
130
- df_long = df_long.loc[ df_long.completions.str.len()>0 ]
131
-
132
- if rm_zero_reward:
133
- df_long = df_long.loc[ df_long.rewards>0 ]
134
 
135
  print(f'Calculating stats for dataframe with shape {df_long.shape}')
136
 
 
 
 
 
137
  g = df_long.groupby([pd.Grouper(key='_timestamp', axis=0, freq=freq), 'run_id'])
138
 
139
- stats = g.agg({'completions':['nunique','count'], 'rewards':['sum','mean','std']})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
 
 
 
141
  stats.columns = ['_'.join(c) for c in stats.columns]
142
- stats['completions_diversity'] = stats['completions_nunique'] / stats['completions_count']
143
  stats = stats.reset_index()
144
 
145
- if save_path:
146
  stats.to_csv(save_path, index=False)
147
 
148
  return stats
149
 
150
 
151
- def clean_data(df):
152
- return df.dropna(subset=df.filter(regex='completions|rewards').columns, how='any').dropna(axis=1, how='all')
153
-
154
- def explode_data(df):
155
- list_cols = utils.get_list_col_lengths(df)
156
- return utils.explode_data(df, list(list_cols.keys())).apply(pd.to_numeric, errors='ignore')
157
-
158
 
159
- def process(run, load=True, save=False, freq='H'):
160
 
161
  try:
162
-
163
  stats_path = f'data/aggs/stats-{run["run_id"]}.csv'
164
- if os.path.exists(stats_path):
165
- print(f'Loaded stats file {stats_path}')
166
  return pd.read_csv(stats_path)
167
 
168
  # Load data and add extra columns from wandb run
169
- df = load_data(run_id=run['run_id'],
170
  run_path=run['run_path'],
171
  load=load,
172
- save=save,
173
- save = (run['state'] != 'running') & run['end_time']
174
  ).assign(**run.to_dict())
175
- # Clean and explode dataframe
176
- df_long = explode_data(clean_data(df))
177
- # Remove original dataframe from memory
178
- del df
179
  # Get and save stats
180
- return calculate_stats(df_long, freq=freq, save_path=stats_path)
181
-
182
  except Exception as e:
183
- print(f'Error processing run {run["run_id"]}: {e}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
  if __name__ == '__main__':
186
 
187
  # TODO: flag to overwrite runs that were running when downloaded and saved: check if file date is older than run end time.
188
-
 
 
 
189
  filters = None# {"tags": {"$in": [f'1.1.{i}' for i in range(10)]}}
190
  # filters={'tags': {'$in': ['5F4tQyWrhfGVcNhoqeiNsR6KjD4wMZ2kfhLj4oHYuyHbZAc3']}} # Is foundation validator
191
- df_runs = pull_wandb_runs(ntop=500, filters=filters)#summary_filters=lambda s: s.get('augment_prompt'))
 
 
 
 
 
 
 
 
 
 
 
192
 
193
  os.makedirs('data/runs/', exist_ok=True)
194
  os.makedirs('data/aggs/', exist_ok=True)
195
- df_runs.to_csv('data/wandb.csv', index=False)
196
-
197
- display(df_runs)
198
- plot_gantt(df_runs)
199
 
200
- with ProcessPoolExecutor(max_workers=min(32, df_runs.shape[0])) as executor:
201
- futures = [executor.submit(process, run, load=True, save=True) for _, run in df_runs.iterrows()]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
203
  # Use tqdm to add a progress bar
204
  results = []
@@ -208,30 +299,39 @@ if __name__ == '__main__':
208
  result = future.result()
209
  results.append(result)
210
  except Exception as e:
211
- print(f'generated an exception: {print_exc(e)}')
212
  pbar.update(1)
213
 
214
  if not results:
215
  raise ValueError('No runs were successfully processed.')
216
 
217
  # Concatenate the results into a single dataframe
218
- df = pd.concat(results, ignore_index=True)
219
 
220
  df.to_csv('data/processed.csv', index=False)
 
221
 
222
  display(df)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
 
224
- fig = px.line(df.astype({'_timestamp':str}),
225
- x='_timestamp',
226
- y='completions_diversity',
227
- # y=['Unique','Total'],
228
- line_group='run_id',
229
- # color='hotkey',
230
- # color_discrete_sequence=px.colors.sequential.YlGnBu,
231
- title='Completion Diversity over Time',
232
- labels={'_timestamp':'', 'completions_diversity':'Diversity', 'uids':'UID','value':'counts', 'variable':'Completions'},
233
- width=800, height=600,
234
- template='plotly_white',
235
- ).update_traces(opacity=0.3)
236
- fig.show()
237
 
 
1
  import os
 
2
  import re
3
+ import argparse
4
  import tqdm
5
  import wandb
6
+ from traceback import format_exc
7
  import plotly.express as px
8
  import pandas as pd
9
  from concurrent.futures import ProcessPoolExecutor
10
 
11
  import opendashboards.utils.utils as utils
12
+ import opendashboards.utils.aggregate as aggregate
13
 
14
  from IPython.display import display
15
 
16
  api= wandb.Api(timeout=60)
17
  wandb.login(anonymous="allow")
18
 
19
+ def pull_wandb_runs(project='openvalidators', filters=None, min_steps=50, max_steps=100_000, ntop=10, netuid=None, summary_filters=None ):
20
  # TODO: speed this up by storing older runs
21
+
22
  all_runs = api.runs(project, filters=filters)
23
  print(f'Using {ntop}/{len(all_runs)} runs with more than {min_steps} events')
24
  pbar = tqdm.tqdm(all_runs)
 
30
  summary = run.summary
31
  if summary_filters is not None and not summary_filters(summary):
32
  continue
33
+ if netuid is not None and summary.get('netuid') != netuid:
34
+ continue
35
  step = summary.get('_step',0)
36
  if step < min_steps or step > max_steps:
37
  # warnings.warn(f'Skipped run `{run.name}` because it contains {step} events (<{min_steps})')
 
63
  'start_time': pd.to_datetime(end_time-duration, unit="s"),
64
  'end_time': pd.to_datetime(end_time, unit="s"),
65
  'duration': pd.to_timedelta(duration, unit="s").round('s'),
66
+ 'netuid': run.config.get('netuid'),
67
  **tags
68
  })
69
  n_events += step
 
89
  fig.update_yaxes(tickfont_size=8, title='')
90
  fig.show()
91
 
 
92
 
93
+ def clean_data(df):
94
+ return df.dropna(subset=df.filter(regex='completions|rewards').columns, how='any').dropna(axis=1, how='all')
95
+
96
+ def explode_data(df):
97
+ list_cols = utils.get_list_col_lengths(df)
98
+ return utils.explode_data(df, list(list_cols.keys())).apply(pd.to_numeric, errors='ignore')
99
+
100
+
101
+ def load_data(run_id, run_path=None, load=True, save=False, explode=True):
102
+
103
+ file_path = os.path.join('data/runs/',f'history-{run_id}.parquet')
104
 
105
  if load and os.path.exists(file_path):
106
+ df = pd.read_parquet(file_path)
107
  # filter out events with missing step length
108
  df = df.loc[df.step_length.notna()]
109
 
110
  # detect list columns which as stored as strings
111
  list_cols = [c for c in df.columns if df[c].dtype == "object" and df[c].str.startswith("[").all()]
112
  # convert string representation of list to list
113
+ # df[list_cols] = df[list_cols].apply(lambda x: eval(x, {'__builtins__': None}) if pd.notna(x) else x)
114
+ try:
115
+ df[list_cols] = df[list_cols].applymap(eval, na_action='ignore')
116
+ except ValueError as e:
117
+ print(f'Error loading {file_path!r} when converting columns {list_cols} to list: {e}')
118
 
119
  else:
120
  # Download the history from wandb and add metadata
121
  run = api.run(run_path)
122
  df = pd.DataFrame(list(run.scan_history()))
123
 
124
+ # Remove rows with missing completions or rewards, which will be stuff related to weights
125
+ df.dropna(subset=df.filter(regex='completions|rewards').columns, how='any', inplace=True)
126
+
127
  print(f'Downloaded {df.shape[0]} events from {run_path!r} with id {run_id!r}')
128
 
129
+ # Clean and explode dataframe
130
+ # overwrite object to free memory
131
+ float_cols = df.filter(regex='reward').columns
132
+ df = explode_data(clean_data(df)).astype({c: float for c in float_cols}).fillna({c: 0 for c in float_cols})
133
+
134
  if save:
135
+ df.to_parquet(file_path, index=False)
136
 
137
  # Convert timestamp to datetime.
138
  df._timestamp = pd.to_datetime(df._timestamp, unit="s")
139
  return df.sort_values("_timestamp")
140
 
141
 
142
+ def calculate_stats(df_long, freq='H', save_path=None, ntop=3 ):
143
 
144
  df_long._timestamp = pd.to_datetime(df_long._timestamp)
145
+
146
  # if dataframe has columns such as followup_completions and answer_completions, convert to multiple rows
147
  if 'completions' not in df_long.columns:
148
  df_long.set_index(['_timestamp','run_id'], inplace=True)
 
152
  ])
153
  df_long = df_schema.reset_index()
154
 
 
 
 
 
 
155
 
156
  print(f'Calculating stats for dataframe with shape {df_long.shape}')
157
 
158
+ # Approximate number of tokens in each completion
159
+ df_long['completion_num_tokens'] = (df_long['completions'].str.split().str.len() / 0.75).round()
160
+
161
+
162
  g = df_long.groupby([pd.Grouper(key='_timestamp', axis=0, freq=freq), 'run_id'])
163
 
164
+ # TODO: use named aggregations
165
+ reward_aggs = ['sum','mean','std','median','max',aggregate.nonzero_rate, aggregate.nonzero_mean, aggregate.nonzero_std, aggregate.nonzero_median]
166
+ aggs = {
167
+ 'completions': ['nunique','count', aggregate.diversity, aggregate.successful_diversity, aggregate.success_rate],
168
+ 'completion_num_tokens': ['mean', 'std', 'median', 'max'],
169
+ **{k: reward_aggs for k in df_long.filter(regex='reward')}
170
+ }
171
+
172
+ # Calculate tokens per second
173
+ if 'completion_times' in df_long.columns:
174
+ df_long['tokens_per_sec'] = df_long['completion_num_tokens']/df_long['completion_times']
175
+ aggs.update({
176
+ 'completion_times': ['mean','std','median','min','max'],
177
+ 'tokens_per_sec': ['mean','std','median','max'],
178
+ })
179
 
180
+ stats = g.agg(aggs)
181
+ stats = stats.merge(g.apply(aggregate.top_stats, exclude='', ntop=ntop).reset_index(level=1,drop=True), left_index=True, right_index=True)
182
+ # flatten multiindex columns
183
  stats.columns = ['_'.join(c) for c in stats.columns]
 
184
  stats = stats.reset_index()
185
 
186
+ if save_path:
187
  stats.to_csv(save_path, index=False)
188
 
189
  return stats
190
 
191
 
 
 
 
 
 
 
 
192
 
193
+ def process(run, load=True, save=False, load_stats=True, freq='H', ntop=3):
194
 
195
  try:
196
+
197
  stats_path = f'data/aggs/stats-{run["run_id"]}.csv'
198
+ if load_stats and os.path.exists(stats_path):
199
+ print(f'Loaded stats file {stats_path!r}')
200
  return pd.read_csv(stats_path)
201
 
202
  # Load data and add extra columns from wandb run
203
+ df_long = load_data(run_id=run['run_id'],
204
  run_path=run['run_path'],
205
  load=load,
206
+ save=save,
207
+ # save = (run['state'] != 'running') & run['end_time']
208
  ).assign(**run.to_dict())
209
+ assert isinstance(df_long, pd.DataFrame), f'Expected dataframe, but got {type(df_long)}'
210
+
 
 
211
  # Get and save stats
212
+ return calculate_stats(df_long, freq=freq, save_path=stats_path, ntop=ntop)
213
+
214
  except Exception as e:
215
+ print(f'Error processing run {run["run_id"]}: { format_exc(e) }')
216
+
217
+ def line_chart(df, col, title=None):
218
+ title = title or col.replace('_',' ').title()
219
+ fig = px.line(df.astype({'_timestamp':str}),
220
+ x='_timestamp', y=col,
221
+ line_group='run_id',
222
+ title=f'{title} over Time',
223
+ labels={'_timestamp':'', col: title, 'uids':'UID','value':'counts', 'variable':'Completions'},
224
+ width=800, height=600,
225
+ template='plotly_white',
226
+ ).update_traces(opacity=0.2)
227
+
228
+ fig.write_image(f'data/figures/{col}.png')
229
+ fig.write_html(f'data/figures/{col}.html')
230
+ return col
231
+
232
+
233
+ def parse_arguments():
234
+ parser = argparse.ArgumentParser(description='Process wandb validator runs for a given netuid.')
235
+ parser.add_argument('--load_runs',action='store_true', help='Load runs from file.')
236
+ parser.add_argument('--repull_unfinished',action='store_true', help='Re-pull runs that were running when downloaded and saved.')
237
+ parser.add_argument('--netuid', type=int, default=None, help='Network UID to use.')
238
+ parser.add_argument('--ntop', type=int, default=1000, help='Number of runs to process.')
239
+ parser.add_argument('--min_steps', type=int, default=100, help='Minimum number of steps to include.')
240
+ parser.add_argument('--max_workers', type=int, default=32, help='Max workers to use.')
241
+ parser.add_argument('--no_plot',action='store_true', help='Prevent plotting.')
242
+ parser.add_argument('--no_save',action='store_true', help='Prevent saving data to file.')
243
+ parser.add_argument('--no_load',action='store_true', help='Prevent loading downloaded data from file.')
244
+ parser.add_argument('--no_load_stats',action='store_true', help='Prevent loading stats data from file.')
245
+ parser.add_argument('--freq', type=str, default='H', help='Frequency to aggregate data.')
246
+ parser.add_argument('--completions_ntop', type=int, default=3, help='Number of top completions to include in stats.')
247
+
248
+ return parser.parse_args()
249
+
250
 
251
  if __name__ == '__main__':
252
 
253
  # TODO: flag to overwrite runs that were running when downloaded and saved: check if file date is older than run end time.
254
+
255
+ args = parse_arguments()
256
+ print(args)
257
+
258
  filters = None# {"tags": {"$in": [f'1.1.{i}' for i in range(10)]}}
259
  # filters={'tags': {'$in': ['5F4tQyWrhfGVcNhoqeiNsR6KjD4wMZ2kfhLj4oHYuyHbZAc3']}} # Is foundation validator
260
+ if args.load_runs and os.path.exists('data/wandb.csv'):
261
+ df_runs = pd.read_csv('data/wandb.csv')
262
+ assert len(df_runs) >= args.ntop, f'Loaded {len(df_runs)} runs, but expected at least {args.ntop}'
263
+ df_runs = df_runs.iloc[:args.ntop]
264
+ else:
265
+ df_runs = pull_wandb_runs(ntop=args.ntop,
266
+ min_steps=args.min_steps,
267
+ netuid=args.netuid,
268
+ filters=filters
269
+ )#summary_filters=lambda s: s.get('augment_prompt'))
270
+ df_runs.to_csv('data/wandb.csv', index=False)
271
+
272
 
273
  os.makedirs('data/runs/', exist_ok=True)
274
  os.makedirs('data/aggs/', exist_ok=True)
275
+ os.makedirs('data/figures/', exist_ok=True)
 
 
 
276
 
277
+ display(df_runs)
278
+ if not args.no_plot:
279
+ plot_gantt(df_runs)
280
+
281
+ with ProcessPoolExecutor(max_workers=min(args.max_workers, df_runs.shape[0])) as executor:
282
+ futures = [executor.submit(
283
+ process,
284
+ run,
285
+ load=not args.no_load,
286
+ save=not args.no_save,
287
+ load_stats=not args.no_load_stats,
288
+ freq=args.freq,
289
+ ntop=args.completions_ntop
290
+ )
291
+ for _, run in df_runs.iterrows()
292
+ ]
293
 
294
  # Use tqdm to add a progress bar
295
  results = []
 
299
  result = future.result()
300
  results.append(result)
301
  except Exception as e:
302
+ print(f'generated an exception: {format_exc(e)}')
303
  pbar.update(1)
304
 
305
  if not results:
306
  raise ValueError('No runs were successfully processed.')
307
 
308
  # Concatenate the results into a single dataframe
309
+ df = pd.concat(results, ignore_index=True).sort_values(['_timestamp','run_id'], ignore_index=True)
310
 
311
  df.to_csv('data/processed.csv', index=False)
312
+ print(f'Saved {df.shape[0]} rows to data/processed.csv')
313
 
314
  display(df)
315
+ if not args.no_plot:
316
+
317
+ plots = []
318
+
319
+ cols = df.set_index(['run_id','_timestamp']).columns
320
+ with ProcessPoolExecutor(max_workers=min(args.max_workers, len(cols))) as executor:
321
+ futures = [executor.submit(line_chart, df, c) for c in cols]
322
+
323
+ # Use tqdm to add a progress bar
324
+ results = []
325
+ with tqdm.tqdm(total=len(futures)) as pbar:
326
+ for future in futures:
327
+ try:
328
+ result = future.result()
329
+ plots.append(result)
330
+ except Exception as e:
331
+ print(f'generated an exception: {format_exc(e)}')
332
+ pbar.update(1)
333
+
334
+ print(f'Saved {len(plots)} plots to data/figures/')
335
+
336
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337