Spaces:
Paused
Paused
Include weights and block range in meta2frame functions
Browse files- meta2frame.py +11 -5
- multigraph.py +2 -2
meta2frame.py
CHANGED
@@ -36,19 +36,21 @@ def YC1(T, a=0.5, b=10):
|
|
36 |
return torch.sigmoid( b * (T - a) )
|
37 |
|
38 |
|
39 |
-
def load_metagraphs(root_dir, netuid):
|
40 |
|
41 |
metagraphs = []
|
42 |
match_path = os.path.join(root_dir, str(netuid), '*.pkl')
|
43 |
files = glob.glob(match_path)
|
44 |
print(f'Found {len(files)} metagraphs in {match_path}')
|
45 |
for path in tqdm.tqdm(files):
|
46 |
-
|
|
|
|
|
47 |
with open(path, 'rb') as f:
|
48 |
metagraph = pickle.load(f)
|
49 |
metagraphs.append(metagraph)
|
50 |
|
51 |
-
return metagraphs
|
52 |
|
53 |
|
54 |
# TODO: can calculate the emission trend using each subnet or just using root subnet
|
@@ -119,11 +121,11 @@ def make_dataframe_old(metagraphs, netuid):
|
|
119 |
df.sort_values(by=['block','netuid'], inplace=True)
|
120 |
return df
|
121 |
|
122 |
-
def make_dataframe(root_dir, netuid, cols=None):
|
123 |
if cols is None:
|
124 |
cols = ['stake','emission','trust','validator_trust','dividends','incentive','R', 'consensus','validator_permit']
|
125 |
frames = []
|
126 |
-
metagraphs = load_metagraphs(root_dir, netuid)
|
127 |
print(f'Loaded {len(metagraphs)} metagraphs for netuid {netuid}')
|
128 |
for m in metagraphs:
|
129 |
frame = pd.DataFrame({k: getattr(m, k) for k in cols})
|
@@ -133,5 +135,9 @@ def make_dataframe(root_dir, netuid, cols=None):
|
|
133 |
frame['uid'] = range(len(frame))
|
134 |
frame['hotkey'] = [axon.hotkey for axon in m.axons]
|
135 |
frame['coldkey'] = [axon.coldkey for axon in m.axons]
|
|
|
|
|
|
|
|
|
136 |
frames.append(frame)
|
137 |
return pd.concat(frames).sort_values(by=['timestamp','block','uid'])
|
|
|
36 |
return torch.sigmoid( b * (T - a) )
|
37 |
|
38 |
|
39 |
+
def load_metagraphs(root_dir, netuid, block_min=0, block_max=3_000_000):
|
40 |
|
41 |
metagraphs = []
|
42 |
match_path = os.path.join(root_dir, str(netuid), '*.pkl')
|
43 |
files = glob.glob(match_path)
|
44 |
print(f'Found {len(files)} metagraphs in {match_path}')
|
45 |
for path in tqdm.tqdm(files):
|
46 |
+
block = int(path.split('/')[-1].split('.')[0])
|
47 |
+
if not block_min <= block <= block_max:
|
48 |
+
continue
|
49 |
with open(path, 'rb') as f:
|
50 |
metagraph = pickle.load(f)
|
51 |
metagraphs.append(metagraph)
|
52 |
|
53 |
+
return sorted(metagraphs, key=lambda x: x.block)
|
54 |
|
55 |
|
56 |
# TODO: can calculate the emission trend using each subnet or just using root subnet
|
|
|
121 |
df.sort_values(by=['block','netuid'], inplace=True)
|
122 |
return df
|
123 |
|
124 |
+
def make_dataframe(root_dir, netuid, cols=None, block_min=0, block_max=3_000_000, weights=False):
|
125 |
if cols is None:
|
126 |
cols = ['stake','emission','trust','validator_trust','dividends','incentive','R', 'consensus','validator_permit']
|
127 |
frames = []
|
128 |
+
metagraphs = load_metagraphs(root_dir, netuid, block_min, block_max)
|
129 |
print(f'Loaded {len(metagraphs)} metagraphs for netuid {netuid}')
|
130 |
for m in metagraphs:
|
131 |
frame = pd.DataFrame({k: getattr(m, k) for k in cols})
|
|
|
135 |
frame['uid'] = range(len(frame))
|
136 |
frame['hotkey'] = [axon.hotkey for axon in m.axons]
|
137 |
frame['coldkey'] = [axon.coldkey for axon in m.axons]
|
138 |
+
if weights and m.W is not None:
|
139 |
+
# convert NxN tensor to a list of lists so it fits into the dataframe
|
140 |
+
frame['weights'] = [w.tolist() for w in m.W]
|
141 |
+
|
142 |
frames.append(frame)
|
143 |
return pd.concat(frames).sort_values(by=['timestamp','block','uid'])
|
multigraph.py
CHANGED
@@ -46,7 +46,7 @@ def parse_arguments():
|
|
46 |
parser.add_argument('--return_graph', action='store_true', help='Return metagraph instead of True.')
|
47 |
parser.add_argument('--no_dataframe', action='store_true', help='Do not create dataframe.')
|
48 |
parser.add_argument('--max_workers', type=int, default=32, help='Max workers to use.')
|
49 |
-
parser.add_argument('--start_block', type=int, default=
|
50 |
parser.add_argument('--num_blocks', type=int, default=0, help='Number of blocks.')
|
51 |
parser.add_argument('--end_block', type=int, default=600_000, help='End block.')
|
52 |
parser.add_argument('--step_size', type=int, default=100, help='Step size.')
|
@@ -67,7 +67,7 @@ if __name__ == '__main__':
|
|
67 |
return_graph=args.return_graph
|
68 |
|
69 |
step_size = args.step_size
|
70 |
-
start_block = args.start_block
|
71 |
start_block = (min(subtensor.block, start_block)//step_size)*step_size # round to nearest step_size
|
72 |
if args.num_blocks:
|
73 |
end_block = start_block - int(args.num_blocks*step_size)
|
|
|
46 |
parser.add_argument('--return_graph', action='store_true', help='Return metagraph instead of True.')
|
47 |
parser.add_argument('--no_dataframe', action='store_true', help='Do not create dataframe.')
|
48 |
parser.add_argument('--max_workers', type=int, default=32, help='Max workers to use.')
|
49 |
+
parser.add_argument('--start_block', type=int, default=None, help='Start block.')
|
50 |
parser.add_argument('--num_blocks', type=int, default=0, help='Number of blocks.')
|
51 |
parser.add_argument('--end_block', type=int, default=600_000, help='End block.')
|
52 |
parser.add_argument('--step_size', type=int, default=100, help='Step size.')
|
|
|
67 |
return_graph=args.return_graph
|
68 |
|
69 |
step_size = args.step_size
|
70 |
+
start_block = args.start_block or subtensor.get_current_block()
|
71 |
start_block = (min(subtensor.block, start_block)//step_size)*step_size # round to nearest step_size
|
72 |
if args.num_blocks:
|
73 |
end_block = start_block - int(args.num_blocks*step_size)
|