Spaces:
Paused
Paused
Update metagraph dashboard
Browse files- meta_utils.py +1 -1
- metadash.py +33 -7
- multigraph.py +40 -33
meta_utils.py
CHANGED
@@ -39,7 +39,7 @@ def load_metagraphs(block_start, block_end, block_step=1000, datadir='data/metag
|
|
39 |
|
40 |
blocks = range(block_start, block_end, block_step)
|
41 |
print(f'Loading blocks {blocks[0]}-{blocks[-1]} from {datadir}')
|
42 |
-
filenames = sorted(filename for filename in os.listdir(datadir) if int(filename.split('.')[0]) in blocks)
|
43 |
print(f'Found {len(filenames)} files in {datadir}')
|
44 |
|
45 |
metagraphs = []
|
|
|
39 |
|
40 |
blocks = range(block_start, block_end, block_step)
|
41 |
print(f'Loading blocks {blocks[0]}-{blocks[-1]} from {datadir}')
|
42 |
+
filenames = sorted(filename for filename in os.listdir(datadir) if filename.split('.')[0].isdigit() and int(filename.split('.')[0]) in blocks)
|
43 |
print(f'Found {len(filenames)} files in {datadir}')
|
44 |
|
45 |
metagraphs = []
|
metadash.py
CHANGED
@@ -5,6 +5,7 @@ from meta_utils import run_subprocess, load_metagraphs
|
|
5 |
# from opendashboards.assets import io, inspect, metric, plot
|
6 |
from meta_plotting import plot_trace, plot_cabals
|
7 |
import asyncio
|
|
|
8 |
|
9 |
## TODO: Read blocks from a big parquet file instead of loading all the pickles -- this is slow
|
10 |
|
@@ -47,24 +48,49 @@ st.title('Metagraph :red[Analysis] Dashboard :eyes:')
|
|
47 |
st.markdown('#')
|
48 |
st.markdown('#')
|
49 |
|
|
|
50 |
subtensor = bittensor.subtensor(network='finney')
|
51 |
current_block = subtensor.get_current_block()
|
52 |
-
current_difficulty = subtensor.difficulty(1, block=current_block)
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
st.metric('Current **block**', current_block, delta='+7200 [24hr]')
|
57 |
-
# st.metric('Current **difficulty**', f'{current_difficulty/10e12:.0}T', delta='?')
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
'Select a **block range**',
|
62 |
options=blockfiles,
|
63 |
value=(DEFAULT_BLOCK_START, DEFAULT_BLOCK_END),
|
64 |
format_func=lambda x: f'{x:,}'
|
65 |
)
|
66 |
|
67 |
-
bcol3
|
|
|
|
|
68 |
|
69 |
|
70 |
with st.spinner(text=f'Loading data...'):
|
|
|
5 |
# from opendashboards.assets import io, inspect, metric, plot
|
6 |
from meta_plotting import plot_trace, plot_cabals
|
7 |
import asyncio
|
8 |
+
from functools import lru_cache
|
9 |
|
10 |
## TODO: Read blocks from a big parquet file instead of loading all the pickles -- this is slow
|
11 |
|
|
|
48 |
st.markdown('#')
|
49 |
st.markdown('#')
|
50 |
|
51 |
+
netuid = 1
|
52 |
subtensor = bittensor.subtensor(network='finney')
|
53 |
current_block = subtensor.get_current_block()
|
|
|
54 |
|
55 |
+
@lru_cache(maxsize=1)
|
56 |
+
def _metagraph(block):
|
|
|
|
|
57 |
|
58 |
+
print('rerunning cache')
|
59 |
+
return (
|
60 |
+
subtensor.metagraph(netuid, block=block),
|
61 |
+
subtensor.metagraph(netuid, block=block - 7200),
|
62 |
+
subtensor.burn(netuid=netuid, block=block),
|
63 |
+
subtensor.burn(netuid=netuid, block=block - 7200),
|
64 |
+
)
|
65 |
|
66 |
+
current_metagraph, yesterday_metagraph, current_burn, yesterday_burn = _metagraph(10*current_block//10)
|
67 |
+
|
68 |
+
current_vcount = current_metagraph.validator_permit.sum().item()
|
69 |
+
current_mcount = (~current_metagraph.validator_permit).sum().item()
|
70 |
+
yesterday_vcount = yesterday_metagraph.validator_permit.sum().item()
|
71 |
+
yesterday_mcount = (~yesterday_metagraph.validator_permit).sum().item()
|
72 |
+
|
73 |
+
mcol1, mcol2, mcol3, mcol4 = st.columns(4)
|
74 |
+
mcol1.metric('Block', current_block, delta='+7200 [24hr]')
|
75 |
+
mcol2.metric('Register Cost', f'{current_burn.unit}{current_burn.tao:.3f}', delta=f'{current_burn.tao-yesterday_burn.tao:.3f}')
|
76 |
+
mcol3.metric('Validators', current_vcount, delta=current_vcount-yesterday_vcount)
|
77 |
+
mcol4.metric('Miners', current_mcount, delta=current_mcount-yesterday_mcount)
|
78 |
+
|
79 |
+
|
80 |
+
st.markdown('#')
|
81 |
+
st.markdown('#')
|
82 |
+
|
83 |
+
bcol1, bcol2, bcol3 = st.columns([0.6, 0.1, 0.2])
|
84 |
+
block_start, block_end = bcol1.select_slider(
|
85 |
'Select a **block range**',
|
86 |
options=blockfiles,
|
87 |
value=(DEFAULT_BLOCK_START, DEFAULT_BLOCK_END),
|
88 |
format_func=lambda x: f'{x:,}'
|
89 |
)
|
90 |
|
91 |
+
with bcol3:
|
92 |
+
st.markdown('#')
|
93 |
+
st.button('Refresh', on_click=run_subprocess)
|
94 |
|
95 |
|
96 |
with st.spinner(text=f'Loading data...'):
|
multigraph.py
CHANGED
@@ -9,7 +9,7 @@ from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
|
|
9 |
|
10 |
import torch
|
11 |
import bittensor
|
12 |
-
|
13 |
#TODO: make line charts and other cool stuff for each metagraph snapshot
|
14 |
|
15 |
def process(block, netuid=1, lite=True, difficulty=False, prune_weights=False, return_graph=False, half=True, subtensor=None):
|
@@ -43,8 +43,9 @@ def parse_arguments():
|
|
43 |
parser.add_argument('--difficulty', action='store_true', help='Include difficulty in metagraph.')
|
44 |
parser.add_argument('--prune_weights', action='store_true', help='Prune weights in metagraph.')
|
45 |
parser.add_argument('--return_graph', action='store_true', help='Return metagraph instead of True.')
|
|
|
46 |
parser.add_argument('--max_workers', type=int, default=32, help='Max workers to use.')
|
47 |
-
parser.add_argument('--start_block', type=int, default=
|
48 |
parser.add_argument('--end_block', type=int, default=600_000, help='End block.')
|
49 |
parser.add_argument('--step_size', type=int, default=100, help='Step size.')
|
50 |
return parser.parse_args()
|
@@ -72,41 +73,47 @@ if __name__ == '__main__':
|
|
72 |
|
73 |
max_workers = min(args.max_workers, len(blocks))
|
74 |
|
75 |
-
|
|
|
76 |
if not overwrite:
|
77 |
blocks = [block for block in blocks if not os.path.exists(f'data/metagraph/{netuid}/{block}.pkl')]
|
78 |
|
79 |
metagraphs = []
|
80 |
|
81 |
-
if len(blocks)
|
82 |
-
print(f'No blocks to process. Current block: {subtensor.block}')
|
83 |
-
quit()
|
84 |
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
-
|
|
|
|
|
|
9 |
|
10 |
import torch
|
11 |
import bittensor
|
12 |
+
from meta_utils import load_metagraphs
|
13 |
#TODO: make line charts and other cool stuff for each metagraph snapshot
|
14 |
|
15 |
def process(block, netuid=1, lite=True, difficulty=False, prune_weights=False, return_graph=False, half=True, subtensor=None):
|
|
|
43 |
parser.add_argument('--difficulty', action='store_true', help='Include difficulty in metagraph.')
|
44 |
parser.add_argument('--prune_weights', action='store_true', help='Prune weights in metagraph.')
|
45 |
parser.add_argument('--return_graph', action='store_true', help='Return metagraph instead of True.')
|
46 |
+
parser.add_argument('--no_dataframe', action='store_true', help='Do not create dataframe.')
|
47 |
parser.add_argument('--max_workers', type=int, default=32, help='Max workers to use.')
|
48 |
+
parser.add_argument('--start_block', type=int, default=1_500_000, help='Start block.')
|
49 |
parser.add_argument('--end_block', type=int, default=600_000, help='End block.')
|
50 |
parser.add_argument('--step_size', type=int, default=100, help='Step size.')
|
51 |
return parser.parse_args()
|
|
|
73 |
|
74 |
max_workers = min(args.max_workers, len(blocks))
|
75 |
|
76 |
+
datadir = f'data/metagraph/{netuid}'
|
77 |
+
os.makedirs(datadir, exist_ok=True)
|
78 |
if not overwrite:
|
79 |
blocks = [block for block in blocks if not os.path.exists(f'data/metagraph/{netuid}/{block}.pkl')]
|
80 |
|
81 |
metagraphs = []
|
82 |
|
83 |
+
if len(blocks)>0:
|
|
|
|
|
84 |
|
85 |
+
print(f'Processing {len(blocks)} blocks from {blocks[0]}-{blocks[-1]} using {max_workers} workers.')
|
86 |
+
|
87 |
+
with ProcessPoolExecutor(max_workers=max_workers) as executor:
|
88 |
+
futures = [
|
89 |
+
executor.submit(process, block, lite=lite(block), netuid=netuid, difficulty=difficulty)
|
90 |
+
for block in blocks
|
91 |
+
]
|
92 |
+
|
93 |
+
success = 0
|
94 |
+
with tqdm.tqdm(total=len(futures)) as pbar:
|
95 |
+
for block, future in zip(blocks,futures):
|
96 |
+
try:
|
97 |
+
metagraphs.append(future.result())
|
98 |
+
success += 1
|
99 |
+
except Exception as e:
|
100 |
+
print(f'generated an exception: {print_exc(e)}')
|
101 |
+
pbar.update(1)
|
102 |
+
pbar.set_description(f'Processed {success} blocks. Current block: {block}')
|
103 |
+
|
104 |
+
if not success:
|
105 |
+
raise ValueError('No blocks were successfully processed.')
|
106 |
+
|
107 |
+
print(f'Processed {success} blocks.')
|
108 |
+
if return_graph:
|
109 |
+
for metagraph in metagraphs:
|
110 |
+
print(f'{metagraph.block}: {metagraph.n.item()} nodes, difficulty={getattr(metagraph, "difficulty", None)}, weights={metagraph.weights.shape if hasattr(metagraph, "weights") else None}')
|
111 |
+
|
112 |
+
print(metagraphs[-1])
|
113 |
+
else:
|
114 |
+
print(f'No blocks to process. Current block: {subtensor.block}')
|
115 |
+
|
116 |
|
117 |
+
if not args.no_dataframe:
|
118 |
+
df = load_metagraphs(min(blocks), max(blocks), block_step=step_size, datadir=datadir)
|
119 |
+
df.to_pickle(f'data/metagraph/{netuid}/df.pkl')
|