Spaces:
Paused
Paused
Add aggregations and renamed metagraph dashboard file
Browse files- metadash.py +114 -0
- opendashboards/utils/aggregate.py +53 -0
metadash.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
import streamlit as st
|
4 |
+
from meta_utils import run_subprocess, load_metagraphs
|
5 |
+
# from opendashboards.assets import io, inspect, metric, plot
|
6 |
+
from meta_plotting import plot_trace, plot_cabals
|
7 |
+
import asyncio
|
8 |
+
|
9 |
+
## TODO: Read blocks from a big parquet file instead of loading all the pickles -- this is slow
|
10 |
+
|
11 |
+
def get_or_create_eventloop():
|
12 |
+
try:
|
13 |
+
return asyncio.get_event_loop()
|
14 |
+
except RuntimeError as ex:
|
15 |
+
if "There is no current event loop in thread" in str(ex):
|
16 |
+
loop = asyncio.new_event_loop()
|
17 |
+
asyncio.set_event_loop(loop)
|
18 |
+
return asyncio.get_event_loop()
|
19 |
+
|
20 |
+
loop = asyncio.new_event_loop()
|
21 |
+
asyncio.set_event_loop(loop)
|
22 |
+
import bittensor
|
23 |
+
|
24 |
+
datadir='data/metagraph/1/'
|
25 |
+
blockfiles = sorted(int(filename.split('.')[0]) for filename in os.listdir(datadir))
|
26 |
+
DEFAULT_SRC = 'miner'
|
27 |
+
DEFAULT_BLOCK_START = blockfiles[0]
|
28 |
+
DEFAULT_BLOCK_END = blockfiles[-1]
|
29 |
+
DEFAULT_BLOCK_STEP = 1000
|
30 |
+
DEFAULT_NTOP = 10
|
31 |
+
DEFAULT_UID_NTOP = 10
|
32 |
+
|
33 |
+
# Set app config
|
34 |
+
st.set_page_config(
|
35 |
+
page_title='Validator Dashboard',
|
36 |
+
menu_items={
|
37 |
+
'Report a bug': "https://github.com/opentensor/dashboards/issues",
|
38 |
+
'About': """
|
39 |
+
This dashboard is part of the OpenTensor project. \n
|
40 |
+
"""
|
41 |
+
},
|
42 |
+
layout = "centered"
|
43 |
+
)
|
44 |
+
|
45 |
+
st.title('Metagraph :red[Analysis] Dashboard :eyes:')
|
46 |
+
# add vertical space
|
47 |
+
st.markdown('#')
|
48 |
+
st.markdown('#')
|
49 |
+
|
50 |
+
subtensor = bittensor.subtensor(network='finney')
|
51 |
+
current_block = subtensor.get_current_block()
|
52 |
+
current_difficulty = subtensor.difficulty(1, block=current_block)
|
53 |
+
|
54 |
+
bcol1, bcol2, bcol3 = st.columns([0.2, 0.6, 0.2])
|
55 |
+
with bcol1:
|
56 |
+
st.metric('Current **block**', current_block, delta='+7200 [24hr]')
|
57 |
+
# st.metric('Current **difficulty**', f'{current_difficulty/10e12:.0}T', delta='?')
|
58 |
+
|
59 |
+
|
60 |
+
block_start, block_end = bcol2.select_slider(
|
61 |
+
'Select a **block range**',
|
62 |
+
options=blockfiles,
|
63 |
+
value=(DEFAULT_BLOCK_START, DEFAULT_BLOCK_END),
|
64 |
+
format_func=lambda x: f'{x:,}'
|
65 |
+
)
|
66 |
+
|
67 |
+
bcol3.button('Refresh', on_click=run_subprocess)
|
68 |
+
|
69 |
+
|
70 |
+
with st.spinner(text=f'Loading data...'):
|
71 |
+
# df = load_metagraphs(block_start=block_start, block_end=block_end, block_step=DEFAULT_BLOCK_STEP)
|
72 |
+
df = pd.read_parquet('blocks_600100_807300_100')
|
73 |
+
|
74 |
+
blocks = df.block.unique()
|
75 |
+
|
76 |
+
df_sel = df.loc[df.block.between(block_start, block_end)]
|
77 |
+
|
78 |
+
|
79 |
+
# add vertical space
|
80 |
+
st.markdown('#')
|
81 |
+
st.markdown('#')
|
82 |
+
|
83 |
+
tab1, tab2, tab3, tab4 = st.tabs(["Overview", "Miners", "Validators", "Block"])
|
84 |
+
|
85 |
+
miner_choices = ['total_stake','ranks','incentive','emission','consensus','trust','validator_trust','dividends']
|
86 |
+
cabal_choices = ['hotkey','ip','coldkey']
|
87 |
+
|
88 |
+
### Overview ###
|
89 |
+
with tab1:
|
90 |
+
|
91 |
+
x_col = st.radio('X-axis', ['block','timestamp'], index=0, horizontal=True)
|
92 |
+
|
93 |
+
acol1, acol2 = st.columns([0.3, 0.7])
|
94 |
+
sel_ntop = acol1.slider('Number:', min_value=1, max_value=50, value=10, key='sel_ntop')
|
95 |
+
#horizontal list
|
96 |
+
miner_choice = acol2.radio('Select:', miner_choices, horizontal=True, index=0)
|
97 |
+
st.plotly_chart(
|
98 |
+
plot_trace(df_sel, time_col=x_col,col=miner_choice, ntop=sel_ntop),
|
99 |
+
use_container_width=True
|
100 |
+
)
|
101 |
+
|
102 |
+
col1, col2 = st.columns(2)
|
103 |
+
count_col = col1.radio('Count', cabal_choices, index=0, horizontal=True)
|
104 |
+
y_col = col2.radio('Agg on', cabal_choices, index=2, horizontal=True)
|
105 |
+
|
106 |
+
st.plotly_chart(
|
107 |
+
plot_cabals(df_sel, time_col=x_col, count_col=count_col, sel_col=y_col, ntop=sel_ntop),
|
108 |
+
use_container_width=True
|
109 |
+
)
|
110 |
+
|
111 |
+
with tab2:
|
112 |
+
|
113 |
+
# plot of miner weights versus time/block
|
114 |
+
pass
|
opendashboards/utils/aggregate.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
|
3 |
+
def diversity(x):
|
4 |
+
return x.nunique()/len(x)
|
5 |
+
|
6 |
+
def _nonempty(x):
|
7 |
+
return x[x.str.len()>0]
|
8 |
+
|
9 |
+
def successful_diversity(x):
|
10 |
+
return diversity(_nonempty(x))
|
11 |
+
|
12 |
+
def success_rate(x):
|
13 |
+
return len(_nonempty(x))/len(x)
|
14 |
+
|
15 |
+
def threshold_rate(x, threshold):
|
16 |
+
return (x>threshold).sum()/len(x)
|
17 |
+
|
18 |
+
def successful_nonzero_diversity(x):
|
19 |
+
# To be used with groupby.apply
|
20 |
+
return pd.Series({'completions_successful_nonzero_diversity': successful_diversity(x.loc[x['rewards']>0,'completions'])})
|
21 |
+
|
22 |
+
def completion_top_stats(x, exclude=None, ntop=1):
|
23 |
+
# To be used with groupby.apply
|
24 |
+
vc = x['completions'].value_counts()
|
25 |
+
if exclude is not None:
|
26 |
+
vc.drop(exclude, inplace=True, errors='ignore')
|
27 |
+
|
28 |
+
rewards = x.loc[x['completions'].isin(vc.index[:ntop])].groupby('completions').rewards.agg(['mean','std'])
|
29 |
+
return pd.DataFrame({
|
30 |
+
'completions_rank':range(ntop),
|
31 |
+
'completions_top':rewards.index.tolist(),
|
32 |
+
'completions_freq':vc.values[:ntop],
|
33 |
+
'completions_reward_mean':rewards['mean'].values,
|
34 |
+
'completions_reward_std':rewards['std'].values
|
35 |
+
})
|
36 |
+
|
37 |
+
def top(x, i=0, exclude=''):
|
38 |
+
return _nonempty(x).value_counts().drop(exclude, errors='ignore').index[i]
|
39 |
+
|
40 |
+
def freq(x, i=0, exclude=''):
|
41 |
+
return _nonempty(x).value_counts().drop(exclude, errors='ignore').values[i]
|
42 |
+
|
43 |
+
def nonzero_rate(x):
|
44 |
+
return (x>0).sum()/len(x)
|
45 |
+
|
46 |
+
def nonzero_mean(x):
|
47 |
+
return x[x>0].mean()
|
48 |
+
|
49 |
+
def nonzero_std(x):
|
50 |
+
return x[x>0].std()
|
51 |
+
|
52 |
+
def nonzero_median(x):
|
53 |
+
return x[x>0].median()
|