steffenc commited on
Commit
f03aa8c
·
1 Parent(s): 9ac2e0d

Copy utils from folding dashboard and update for prompting

Browse files
Files changed (1) hide show
  1. utils.py +182 -0
utils.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tqdm
3
+ import time
4
+ import wandb
5
+ import streamlit as st
6
+ import pandas as pd
7
+ import bittensor as bt
8
+
9
+
10
+ # TODO: Store the runs dataframe (as in sn1 dashboard) and top up with the ones created since the last snapshot
11
+ # TODO: Store relevant wandb data in a database for faster access
12
+
13
+
14
+ MIN_STEPS = 10 # minimum number of steps in wandb run in order to be worth analyzing
15
+ MAX_RUNS = 100#0000
16
+ NETUID = 1
17
+ BASE_PATH = 'macrocosmos/prompting-validators'
18
+ NETWORK = 'finney'
19
+ KEYS = None
20
+ ABBREV_CHARS = 8
21
+ ENTITY_CHOICES = ('identity', 'hotkey', 'coldkey')
22
+
23
+
24
+ api = wandb.Api(timeout=600)
25
+
26
+ IDENTITIES = {
27
+ '5F4tQyWrhfGVcNhoqeiNsR6KjD4wMZ2kfhLj4oHYuyHbZAc3': 'opentensor',
28
+ '5Hddm3iBFD2GLT5ik7LZnT3XJUnRnN8PoeCFgGQgawUVKNm8': 'taostats',
29
+ '5HEo565WAy4Dbq3Sv271SAi7syBSofyfhhwRNjFNSM2gP9M2': 'foundry',
30
+ '5HK5tp6t2S59DywmHRWPBVJeJ86T61KjurYqeooqj8sREpeN': 'bittensor-guru',
31
+ '5FFApaS75bv5pJHfAp2FVLBj9ZaXuFDjEypsaBNc1wCfe52v': 'roundtable-21',
32
+ '5EhvL1FVkQPpMjZX4MAADcW42i3xPSF1KiCpuaxTYVr28sux': 'tao-validator',
33
+ '5FKstHjZkh4v3qAMSBa1oJcHCLjxYZ8SNTSz1opTv4hR7gVB': 'datura',
34
+ '5DvTpiniW9s3APmHRYn8FroUWyfnLtrsid5Mtn5EwMXHN2ed': 'first-tensor',
35
+ '5HbLYXUBy1snPR8nfioQ7GoA9x76EELzEq9j7F32vWUQHm1x': 'tensorplex',
36
+ '5CsvRJXuR955WojnGMdok1hbhffZyB4N5ocrv82f3p5A2zVp': 'owl-ventures',
37
+ '5CXRfP2ekFhe62r7q3vppRajJmGhTi7vwvb2yr79jveZ282w': 'rizzo',
38
+ '5HNQURvmjjYhTSksi8Wfsw676b4owGwfLR2BFAQzG7H3HhYf': 'neural-internet'
39
+ }
40
+
41
+ EXTRACTORS = {
42
+ 'state': lambda x: x.state,
43
+ 'run_id': lambda x: x.id,
44
+ 'run_path': lambda x: os.path.join(BASE_PATH, x.id),
45
+ 'user': lambda x: x.user.name[:16],
46
+ 'username': lambda x: x.user.username[:16],
47
+ 'created_at': lambda x: pd.Timestamp(x.created_at),
48
+ 'last_event_at': lambda x: pd.Timestamp(x.summary.get('_timestamp'), unit='s'),
49
+
50
+ 'netuid': lambda x: x.config.get('netuid'),
51
+ 'mock': lambda x: x.config.get('neuron').get('mock'),
52
+ 'sample_size': lambda x: x.config.get('neuron').get('sample_size'),
53
+ 'timeout': lambda x: x.config.get('neuron').get('timeout'),
54
+ 'epoch_length': lambda x: x.config.get('neuron').get('epoch_length'),
55
+ 'disable_set_weights': lambda x: x.config.get('neuron').get('disable_set_weights'),
56
+
57
+ # This stuff is from the last logged event
58
+ 'num_steps': lambda x: x.summary.get('_step'),
59
+ 'runtime': lambda x: x.summary.get('_runtime'),
60
+ 'query': lambda x: x.summary.get('query'),
61
+ 'challenge': lambda x: x.summary.get('challenge'),
62
+ 'reference': lambda x: x.summary.get('reference'),
63
+ 'completions': lambda x: x.summary.get('completions'),
64
+
65
+ 'version': lambda x: x.tags[0],
66
+ 'spec_version': lambda x: x.tags[1],
67
+ 'vali_hotkey': lambda x: x.tags[2],
68
+ # 'tasks_selected': lambda x: x.tags[3:],
69
+
70
+ # System metrics
71
+ 'disk_read': lambda x: x.system_metrics.get('system.disk.in'),
72
+ 'disk_write': lambda x: x.system_metrics.get('system.disk.out'),
73
+ # Really slow stuff below
74
+ # 'started_at': lambda x: x.metadata.get('startedAt'),
75
+ # 'disk_used': lambda x: x.metadata.get('disk').get('/').get('used'),
76
+ # 'commit': lambda x: x.metadata.get('git').get('commit')
77
+ }
78
+
79
+
80
+ def get_leaderboard(df, ntop=10, entity_choice='identity'):
81
+
82
+ df = df.loc[df.validator_permit==False]
83
+ df.index = range(df.shape[0])
84
+ return df.groupby(entity_choice).I.sum().sort_values().reset_index().tail(ntop)
85
+
86
+ @st.cache_data()
87
+ def get_metagraph(time):
88
+ print(f'Loading metagraph with time {time}')
89
+ subtensor = bt.subtensor(network=NETWORK)
90
+ m = subtensor.metagraph(netuid=NETUID)
91
+ meta_cols = ['I','stake','trust','validator_trust','validator_permit','C','R','E','dividends','last_update']
92
+
93
+ df_m = pd.DataFrame({k: getattr(m, k) for k in meta_cols})
94
+ df_m['uid'] = range(m.n.item())
95
+ df_m['hotkey'] = list(map(lambda a: a.hotkey, m.axons))
96
+ df_m['coldkey'] = list(map(lambda a: a.coldkey, m.axons))
97
+ df_m['ip'] = list(map(lambda a: a.ip, m.axons))
98
+ df_m['port'] = list(map(lambda a: a.port, m.axons))
99
+ df_m['coldkey'] = df_m.coldkey.str[:ABBREV_CHARS]
100
+ df_m['hotkey'] = df_m.hotkey.str[:ABBREV_CHARS]
101
+ df_m['identity'] = df_m.apply(lambda x: f'{x.hotkey} @ uid {x.uid}', axis=1)
102
+ return df_m
103
+
104
+
105
+ @st.cache_data()
106
+ def load_run(run_path, keys=KEYS):
107
+
108
+ print('Loading run:', run_path)
109
+ run = api.run(run_path)
110
+ df = pd.DataFrame(list(run.scan_history(keys=keys)))
111
+ for col in ['updated_at', 'created_at']:
112
+ if col in df.columns:
113
+ df[col] = pd.to_datetime(df[col])
114
+ print(f'+ Loaded {len(df)} records')
115
+ return df
116
+
117
+ @st.cache_data(show_spinner=False)
118
+ def build_data(timestamp=None, path=BASE_PATH, min_steps=MIN_STEPS, use_cache=True):
119
+
120
+ save_path = '_saved_runs.csv'
121
+ filters = {}
122
+ df = pd.DataFrame()
123
+ # Load the last saved runs so that we only need to update the new ones
124
+ if use_cache and os.path.exists(save_path):
125
+ df = pd.read_csv(save_path)
126
+ df['created_at'] = pd.to_datetime(df['created_at'])
127
+ df['last_event_at'] = pd.to_datetime(df['last_event_at'])
128
+
129
+ timestamp_str = df['last_event_at'].max().isoformat()
130
+ filters.update({'updated_at': {'$gte': timestamp_str}})
131
+
132
+ progress = st.progress(0, text='Loading data')
133
+
134
+ runs = api.runs(path, filters=filters)
135
+
136
+ run_data = []
137
+ n_events = 0
138
+ for i, run in enumerate(tqdm.tqdm(runs, total=len(runs))):
139
+ num_steps = run.summary.get('_step',0)
140
+ if num_steps<min_steps:
141
+ continue
142
+ n_events += num_steps
143
+ prog_msg = f'Loading data {i/len(runs)*100:.0f}%, {n_events:,.0f} events)'
144
+ progress.progress(i/len(runs),text=f'{prog_msg}... **downloading** `{os.path.join(*run.path)}`')
145
+
146
+ run_data.append(run)
147
+
148
+ progress.empty()
149
+
150
+ df_new = pd.DataFrame([{k: func(run) for k, func in EXTRACTORS.items()} for run in tqdm.tqdm(run_data, total=len(run_data))])
151
+ df = pd.concat([df, df_new], ignore_index=True)
152
+ df['duration'] = (df.last_event_at - df.created_at).round('s')
153
+ df['identity'] = df['vali_hotkey'].map(IDENTITIES).fillna('unknown')
154
+ df['vali_hotkey'] = df['vali_hotkey'].str[:ABBREV_CHARS]
155
+
156
+ df.to_csv(save_path, index=False)
157
+ return df
158
+
159
+
160
+ def load_state_vars():
161
+ UPDATE_INTERVAL = 600
162
+
163
+ df = build_data(time.time()//UPDATE_INTERVAL)
164
+ runs_alive_24h_ago = (df.last_event_at > pd.Timestamp.now() - pd.Timedelta('1d'))
165
+ df_24h = df.loc[runs_alive_24h_ago]
166
+
167
+ df_m = get_metagraph(time.time()//UPDATE_INTERVAL)
168
+
169
+ return {
170
+ 'dataframe': df,
171
+ 'dataframe_24h': df_24h,
172
+ 'metagraph': df_m,
173
+ }
174
+
175
+
176
+ if __name__ == '__main__':
177
+
178
+ print('Loading runs')
179
+ df = load_runs()
180
+
181
+ df.to_csv('test_wandb_data.csv', index=False)
182
+ print(df)