alex n commited on
Commit
e5457b7
·
1 Parent(s): c6fe3ec

first commit

Browse files
Files changed (4) hide show
  1. Makefile +4 -3
  2. app.py +120 -194
  3. axoninfo.py +41 -0
  4. requirements.txt +4 -15
Makefile CHANGED
@@ -1,13 +1,14 @@
1
- .PHONY: style format
2
-
3
 
4
  style:
5
  python -m black --line-length 119 .
6
  python -m isort .
7
  ruff check --fix .
8
 
9
-
10
  quality:
11
  python -m black --check --line-length 119 .
12
  python -m isort --check-only .
13
  ruff check .
 
 
 
 
1
+ .PHONY: style format run
 
2
 
3
  style:
4
  python -m black --line-length 119 .
5
  python -m isort .
6
  ruff check --fix .
7
 
 
8
  quality:
9
  python -m black --check --line-length 119 .
10
  python -m isort --check-only .
11
  ruff check .
12
+
13
+ run:
14
+ python -m src.main
app.py CHANGED
@@ -1,204 +1,130 @@
1
  import gradio as gr
2
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
 
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
- from huggingface_hub import snapshot_download
6
-
7
- from src.about import (
8
- CITATION_BUTTON_LABEL,
9
- CITATION_BUTTON_TEXT,
10
- EVALUATION_QUEUE_TEXT,
11
- INTRODUCTION_TEXT,
12
- LLM_BENCHMARKS_TEXT,
13
- TITLE,
14
- )
15
- from src.display.css_html_js import custom_css
16
- from src.display.utils import (
17
- BENCHMARK_COLS,
18
- COLS,
19
- EVAL_COLS,
20
- EVAL_TYPES,
21
- AutoEvalColumn,
22
- ModelType,
23
- fields,
24
- WeightType,
25
- Precision
26
- )
27
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
28
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
- from src.submission.submit import add_new_eval
30
-
31
-
32
- def restart_space():
33
- API.restart_space(repo_id=REPO_ID)
34
-
35
- ### Space initialisation
36
- try:
37
- print(EVAL_REQUESTS_PATH)
38
- snapshot_download(
39
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
40
- )
41
- except Exception:
42
- restart_space()
43
- try:
44
- print(EVAL_RESULTS_PATH)
45
- snapshot_download(
46
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
47
- )
48
- except Exception:
49
- restart_space()
50
-
51
-
52
- LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
53
-
54
- (
55
- finished_eval_queue_df,
56
- running_eval_queue_df,
57
- pending_eval_queue_df,
58
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
59
-
60
- def init_leaderboard(dataframe):
61
- if dataframe is None or dataframe.empty:
62
- raise ValueError("Leaderboard DataFrame is empty or None.")
63
- return Leaderboard(
64
- value=dataframe,
65
- datatype=[c.type for c in fields(AutoEvalColumn)],
66
- select_columns=SelectColumns(
67
- default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
68
- cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
- label="Select Columns to Display:",
70
- ),
71
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
- hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
- filter_columns=[
74
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
- ColumnFilter(
77
- AutoEvalColumn.params.name,
78
- type="slider",
79
- min=0.01,
80
- max=150,
81
- label="Select the number of parameters (B)",
82
- ),
83
- ColumnFilter(
84
- AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
85
- ),
86
- ],
87
- bool_checkboxgroup_label="Hide models",
88
- interactive=False,
89
- )
90
-
91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  demo = gr.Blocks(css=custom_css)
93
- with demo:
94
- gr.HTML(TITLE)
95
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
96
-
97
- with gr.Tabs(elem_classes="tab-buttons") as tabs:
98
- with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
99
- leaderboard = init_leaderboard(LEADERBOARD_DF)
100
-
101
- with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
102
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
-
104
- with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
- with gr.Column():
106
- with gr.Row():
107
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
-
109
- with gr.Column():
110
- with gr.Accordion(
111
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
112
- open=False,
113
- ):
114
- with gr.Row():
115
- finished_eval_table = gr.components.Dataframe(
116
- value=finished_eval_queue_df,
117
- headers=EVAL_COLS,
118
- datatype=EVAL_TYPES,
119
- row_count=5,
120
- )
121
- with gr.Accordion(
122
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
123
- open=False,
124
- ):
125
- with gr.Row():
126
- running_eval_table = gr.components.Dataframe(
127
- value=running_eval_queue_df,
128
- headers=EVAL_COLS,
129
- datatype=EVAL_TYPES,
130
- row_count=5,
131
- )
132
-
133
- with gr.Accordion(
134
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
135
- open=False,
136
- ):
137
- with gr.Row():
138
- pending_eval_table = gr.components.Dataframe(
139
- value=pending_eval_queue_df,
140
- headers=EVAL_COLS,
141
- datatype=EVAL_TYPES,
142
- row_count=5,
143
- )
144
- with gr.Row():
145
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  with gr.Row():
148
- with gr.Column():
149
- model_name_textbox = gr.Textbox(label="Model name")
150
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
- model_type = gr.Dropdown(
152
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
- label="Model type",
154
- multiselect=False,
155
- value=None,
156
- interactive=True,
157
- )
158
-
159
- with gr.Column():
160
- precision = gr.Dropdown(
161
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
162
- label="Precision",
163
- multiselect=False,
164
- value="float16",
165
- interactive=True,
166
- )
167
- weight_type = gr.Dropdown(
168
- choices=[i.value.name for i in WeightType],
169
- label="Weights type",
170
- multiselect=False,
171
- value="Original",
172
- interactive=True,
173
- )
174
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
-
176
- submit_button = gr.Button("Submit Eval")
177
- submission_result = gr.Markdown()
178
- submit_button.click(
179
- add_new_eval,
180
- [
181
- model_name_textbox,
182
- base_model_name_textbox,
183
- revision_name_textbox,
184
- precision,
185
- weight_type,
186
- model_type,
187
- ],
188
- submission_result,
189
  )
190
 
191
- with gr.Row():
192
- with gr.Accordion("📙 Citation", open=False):
193
- citation_button = gr.Textbox(
194
- value=CITATION_BUTTON_TEXT,
195
- label=CITATION_BUTTON_LABEL,
196
- lines=20,
197
- elem_id="citation-button",
198
- show_copy_button=True,
199
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
- scheduler = BackgroundScheduler()
202
- scheduler.add_job(restart_space, "interval", seconds=1800)
203
- scheduler.start()
204
- demo.queue(default_concurrency_limit=40).launch()
 
1
  import gradio as gr
2
+ import bittensor as bt
3
+ import requests
4
  import pandas as pd
5
  from apscheduler.schedulers.background import BackgroundScheduler
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ # Custom CSS for better appearance
8
+ custom_css = """
9
+ .gradio-container {
10
+ max-width: 1200px !important;
11
+ margin: auto;
12
+ }
13
+ .title {
14
+ text-align: center;
15
+ margin-bottom: 1rem;
16
+ }
17
+ .status-active { color: green; }
18
+ .status-error { color: red; }
19
+ """
20
+
21
+ # Initialize bittensor objects
22
+ subtensor = bt.subtensor()
23
+ metagraph = bt.metagraph(netuid=36)
24
+
25
+ def get_validator_data() -> pd.DataFrame:
26
+ validator_ids = [i for i in range(len(metagraph.validator_permit)) if metagraph.validator_permit[i]]
27
+
28
+ results = []
29
+ for uid in validator_ids:
30
+ try:
31
+ ip = metagraph.axons[uid].ip_str().split('/')[-1]
32
+ response = requests.get(f'http://{ip}/step', timeout=5)
33
+ response.raise_for_status()
34
+ validator_info = {
35
+ 'UID': uid,
36
+ 'IP': ip,
37
+ 'Bits': response.json().get('bits', 0),
38
+ 'Status': '✅ Active'
39
+ }
40
+ except Exception as e:
41
+ validator_info = {
42
+ 'UID': uid,
43
+ 'IP': metagraph.axons[uid].ip_str().split('/')[-1],
44
+ 'Bits': 0,
45
+ 'Status': f'❌ Error: {str(e)[:50]}...' if len(str(e)) > 50 else str(e)
46
+ }
47
+ results.append(validator_info)
48
+
49
+ df = pd.DataFrame(results)
50
+ return df.sort_values('Bits', ascending=False)
51
+
52
+ # Create the Gradio interface
53
  demo = gr.Blocks(css=custom_css)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
+ with demo:
56
+ gr.HTML(
57
+ """
58
+ <div class="title">
59
+ <h1>🏆 Validator Bits Leaderboard</h1>
60
+ <p>Real-time tracking of validator performance and bits</p>
61
+ </div>
62
+ """
63
+ )
64
+
65
+ with gr.Tabs() as tabs:
66
+ with gr.Tab("📊 Leaderboard"):
67
+ leaderboard = gr.DataFrame(
68
+ headers=['UID', 'IP', 'Bits', 'Status'],
69
+ datatype=['number', 'str', 'number', 'str'],
70
+ interactive=False
71
+ )
72
+
73
  with gr.Row():
74
+ refresh_button = gr.Button("🔄 Refresh Data", variant="primary")
75
+ auto_refresh = gr.Checkbox(
76
+ label="Auto-refresh (5 min)",
77
+ value=True,
78
+ interactive=True
79
+ )
80
+
81
+ status_message = gr.Markdown("Last updated: Never")
82
+
83
+ with gr.Tab("ℹ️ About"):
84
+ gr.Markdown(
85
+ """
86
+ ## About this Leaderboard
87
+
88
+ This dashboard shows real-time information about validators on the network:
89
+
90
+ - **UID**: Unique identifier of the validator
91
+ - **IP**: Validator's IP address
92
+ - **Bits**: Current bits count
93
+ - **Status**: Active/Error status of the validator
94
+
95
+ Data is automatically refreshed every 5 minutes, or you can manually refresh using the button.
96
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  )
98
 
99
+ def update_leaderboard():
100
+ df = get_validator_data()
101
+ timestamp = pd.Timestamp.now().strftime("%Y-%m-%d %H:%M:%S UTC")
102
+ return df, f"Last updated: {timestamp}"
103
+
104
+ refresh_button.click(
105
+ fn=update_leaderboard,
106
+ outputs=[leaderboard, status_message]
107
+ )
108
+
109
+ # Auto-refresh logic
110
+ def setup_auto_refresh():
111
+ if demo.scheduler:
112
+ demo.scheduler.shutdown()
113
+ demo.scheduler = BackgroundScheduler()
114
+ demo.scheduler.add_job(
115
+ lambda: demo.queue(update_leaderboard),
116
+ 'interval',
117
+ minutes=5
118
+ )
119
+ demo.scheduler.start()
120
+
121
+ # Initial data load
122
+ demo.load(
123
+ fn=update_leaderboard,
124
+ outputs=[leaderboard, status_message]
125
+ )
126
+
127
+ setup_auto_refresh()
128
 
129
+ # Launch the interface
130
+ demo.queue(default_concurrency_limit=5).launch()
 
 
axoninfo.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bittensor as bt
2
+ import requests
3
+ import json
4
+
5
+ subtensor = bt.subtensor()
6
+ metagraph = bt.metagraph(netuid=36)
7
+
8
+ #print(metagraph.axons)
9
+
10
+ #print(metagraph.validator_permit[48])
11
+
12
+
13
+ def get_validator_ids():
14
+ return [i for i in range(len(metagraph.validator_permit)) if metagraph.validator_permit[i]]
15
+
16
+ def get_validator_axons(list):
17
+ return [metagraph.axons[uid].ip_str().split('/')[-1] for uid in list]
18
+
19
+
20
+ print(get_validator_axons(get_validator_ids()))
21
+
22
+ response = requests.get('http://' + metagraph.axons[178].ip_str().split('/')[-1] + '/step')
23
+ response.raise_for_status()
24
+ print(response.json())
25
+ def get_validator_bits(validator_ids, timeout=5, retries=3):
26
+ results = []
27
+ for uid in validator_ids:
28
+ for attempt in range(retries):
29
+ try:
30
+ ip = metagraph.axons[uid].ip_str().split('/')[-1]
31
+ response = requests.get(f'http://{ip}/step', timeout=timeout)
32
+ response.raise_for_status()
33
+ results.append(response.json())
34
+ break # Success, exit retry loop
35
+ except requests.exceptions.RequestException as e:
36
+ if attempt == retries - 1: # Last attempt
37
+ print(f"Failed to get bits from validator {uid} after {retries} attempts: {str(e)}")
38
+ continue
39
+ return results
40
+
41
+ print(get_validator_bits(get_validator_ids()))
requirements.txt CHANGED
@@ -1,16 +1,5 @@
1
- APScheduler
2
- black
3
- datasets
4
- gradio
5
- gradio[oauth]
6
- gradio_leaderboard==0.0.9
7
- gradio_client
8
- huggingface-hub>=0.18.0
9
- matplotlib
10
- numpy
11
  pandas
12
- python-dateutil
13
- tqdm
14
- transformers
15
- tokenizers>=0.15.0
16
- sentencepiece
 
1
+ gradio>=4.0.0
2
+ bittensor
 
 
 
 
 
 
 
 
3
  pandas
4
+ apscheduler
5
+ requests