AlexNijjar commited on
Commit
6c858ba
·
1 Parent(s): 1593d57

Initial commit

Browse files
.gitignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ .idea/
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
  title: Edge Maxxing Dashboard
3
- emoji: 💻
4
  colorFrom: purple
5
  colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.5.0
8
  app_file: app.py
9
- pinned: false
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Edge Maxxing Dashboard
3
+ emoji: 🏆
4
  colorFrom: purple
5
  colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.5.0
8
  app_file: app.py
9
+ pinned: true
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
pyproject.toml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.ruff]
2
+ # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
3
+ select = ["E", "F"]
4
+ ignore = ["E501"] # line too long (black is taking care of this)
5
+ line-length = 119
6
+ fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
7
+
8
+ [tool.isort]
9
+ profile = "black"
10
+ line_length = 119
11
+
12
+ [tool.black]
13
+ line-length = 119
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio==5.5.0
2
+ bittensor==8.2.0
3
+ wandb==0.18.5
4
+ substrate-interface==1.7.10
5
+ plotly==5.24.1
6
+ pandas==2.2.3
7
+ packaging==24.1
src/app.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from chain_data import sync_metagraph
4
+ from leaderboard import create_leaderboard, create_dropdown
5
+ from validator_states import create_validator_states
6
+ from validator_weights import create_weights
7
+ from wandb_data import sync
8
+
9
+
10
+ def main():
11
+ sync_metagraph()
12
+ sync()
13
+ with gr.Blocks(css=".typewriter {font-family: 'JMH Typewriter', sans-serif;}", fill_height=True, fill_width=True) as app:
14
+ with gr.Tab("Leaderboard") as leaderboard_tab:
15
+ dropdown = gr.Dropdown()
16
+ dropdown.attach_load_event(lambda: create_dropdown(), None)
17
+
18
+ leaderboard_dataframe = gr.Dataframe()
19
+ leaderboard_dataframe.attach_load_event(lambda uid: create_leaderboard(uid), None, inputs=[dropdown])
20
+ leaderboard_tab.select(lambda uid: create_leaderboard(uid), inputs=[dropdown], outputs=[leaderboard_dataframe])
21
+
22
+ dropdown.change(lambda uid: create_leaderboard(uid), inputs=[dropdown], outputs=[leaderboard_dataframe])
23
+
24
+ with gr.Tab("Validator States") as validator_states_tab:
25
+ validator_states_dataframe = gr.Dataframe()
26
+ validator_states_dataframe.attach_load_event(lambda: create_validator_states(), None)
27
+ validator_states_tab.select(lambda: create_validator_states(), [], [validator_states_dataframe])
28
+
29
+ with gr.Tab("Validator Weights") as validator_weights_tab:
30
+ validator_weights_dataframe = gr.Dataframe()
31
+ validator_weights_dataframe.attach_load_event(lambda: create_weights(), None)
32
+ validator_weights_tab.select(lambda: create_weights(), [], [validator_weights_dataframe])
33
+
34
+ with gr.Tab("Model Demo"):
35
+ gr.Label("Coming soon!", show_label=False)
36
+ app.launch()
37
+
38
+
39
+ if __name__ == "__main__":
40
+ main()
src/chain_data.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime, timedelta
2
+ from typing import TypeAlias
3
+
4
+ import bittensor as bt
5
+
6
+ from wandb_data import Uid, TIMEZONE
7
+
8
+ Weight: TypeAlias = float
9
+ Incentive: TypeAlias = float
10
+
11
+ NET_UID = 39
12
+ WEIGHTS_BY_MINER: list[list[tuple[Uid, Weight]]] = []
13
+ INCENTIVES: dict[Uid, Incentive] = {}
14
+ VALIDATOR_IDENTITIES: dict[Uid, str] = {}
15
+ VTRUST: dict[Uid, float] = {}
16
+ UPDATED: dict[Uid, int] = {}
17
+
18
+ subtensor = bt.subtensor()
19
+ metagraph = bt.metagraph(netuid=NET_UID)
20
+
21
+
22
+ def fetch_weights():
23
+ WEIGHTS_BY_MINER.clear()
24
+ weights = subtensor.weights(netuid=NET_UID)
25
+
26
+ for miner_uid in range(metagraph.n.item()):
27
+ WEIGHTS_BY_MINER.append([])
28
+ for validator_uid, validator_weights in weights:
29
+ if not metagraph.validator_permit[validator_uid]:
30
+ continue
31
+
32
+ weight = 0.0
33
+ for miner_weight in validator_weights:
34
+ if miner_weight[0] == miner_uid:
35
+ weight = miner_weight[1] / 2 ** 16
36
+ break
37
+ WEIGHTS_BY_MINER[miner_uid].append((validator_uid, weight))
38
+
39
+
40
+ def fetch_incentives():
41
+ INCENTIVES.clear()
42
+ for i in range(len(metagraph.incentive)):
43
+ INCENTIVES[i] = metagraph.incentive[i]
44
+
45
+ def fetch_vtrust():
46
+ VTRUST.clear()
47
+ for i in range(len(metagraph.validator_trust)):
48
+ VTRUST[i] = metagraph.validator_trust[i]
49
+
50
+ def fetch_updated():
51
+ UPDATED.clear()
52
+ block = subtensor.get_current_block()
53
+ for i in range(len(metagraph.last_update)):
54
+ UPDATED[i] = block - metagraph.last_update[i]
55
+
56
+
57
+ def fetch_identities():
58
+ VALIDATOR_IDENTITIES.clear()
59
+ for uid in range(metagraph.n.item()):
60
+ if not metagraph.validator_permit[uid]:
61
+ continue
62
+ identity = subtensor.substrate.query('SubtensorModule', 'Identities', [metagraph.coldkeys[uid]])
63
+ if identity != None:
64
+ VALIDATOR_IDENTITIES[uid] = identity.value["name"]
65
+
66
+
67
+ last_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
68
+ last_identity_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
69
+
70
+
71
+ def sync_metagraph():
72
+ global last_sync
73
+ now = datetime.now(TIMEZONE)
74
+ if now - last_sync < timedelta(minutes=5):
75
+ return
76
+
77
+ print("Syncing metagraph...")
78
+ last_sync = now
79
+ metagraph.sync(subtensor=subtensor)
80
+
81
+ fetch_weights()
82
+ fetch_incentives()
83
+ fetch_vtrust()
84
+ fetch_updated()
85
+
86
+ global last_identity_sync
87
+ if now - last_identity_sync < timedelta(hours=12):
88
+ return
89
+
90
+ last_identity_sync = now
91
+ fetch_identities()
src/leaderboard.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+ import pandas as pd
5
+
6
+ from wandb_data import get_current_runs
7
+
8
+ DEFAULT_VALIDATOR_UID = int(os.environ["DEFAULT_VALIDATOR_UID"])
9
+
10
+ def create_dropdown() -> gr.Dropdown:
11
+ choices: list[tuple[str, int]] = []
12
+ runs = get_current_runs()
13
+ for run in runs:
14
+ pretty_name = f"{run.uid} - {run.name} ({run.status.name()})"
15
+ choices.append((pretty_name, run.uid))
16
+
17
+ choices = sorted(choices, key=lambda x: x[1])
18
+
19
+ default = DEFAULT_VALIDATOR_UID
20
+ if not default in [uid for _, uid in choices]:
21
+ default = choices[0][1]
22
+ return gr.Dropdown(
23
+ choices,
24
+ value=default,
25
+ interactive=True,
26
+ label="Source Validator"
27
+ )
28
+
29
+ def create_leaderboard(validator_uid) -> gr.Dataframe:
30
+ data: list[list] = []
31
+ runs = get_current_runs()
32
+ for run in runs:
33
+ if run.uid != validator_uid:
34
+ continue
35
+ for submission in run.submissions.values():
36
+ data.append([
37
+ submission.info.uid,
38
+ f"[{'/'.join(submission.info.repository.split('/')[-2:])}]({submission.info.repository})",
39
+ submission.tier,
40
+ round(submission.score, 3),
41
+ f"{submission.metrics.generation_time:.3f}s",
42
+ f"{submission.average_similarity * 100:.3f}%",
43
+ f"{submission.metrics.size / 1_000_000_000:.3f}GB",
44
+ f"{submission.metrics.vram_used / 1_000_000_000:.3f}GB",
45
+ f"{submission.metrics.watts_used:.3f}W",
46
+ f"{submission.metrics.load_time:.3f}s",
47
+ f"[{submission.info.block}](https://taostats.io/block/{submission.info.block})",
48
+ f"[{submission.info.revision}]({submission.info.repository}/commit/{submission.info.revision})",
49
+ f"[{submission.info.hotkey[:6]}...](https://taostats.io/hotkey/{submission.info.hotkey})",
50
+ ])
51
+
52
+ data.sort(key=lambda x: (-x[2], int(x[10].split('[')[1].split(']')[0])))
53
+
54
+ return gr.Dataframe(
55
+ pd.DataFrame(data, columns=["UID", "Model", "Tier", "Score", "Gen Time", "Similarity", "Size", "VRAM Usage", "Power Usage", "Load Time", "Block", "Revision", "Hotkey"]),
56
+ datatype=["number", "markdown", "number", "number", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown"],
57
+ interactive=False,
58
+ )
src/validator_states.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import statistics
2
+ from datetime import timedelta
3
+
4
+ import gradio as gr
5
+ import pandas as pd
6
+ from packaging import version
7
+
8
+ from wandb_data import get_current_runs, Run
9
+ from chain_data import VTRUST, UPDATED
10
+
11
+ AVERAGE_BENCHMARK_TIME_WARNING_THRESHOLD = 180 # 3 minutes
12
+ ETA_WARNING_THRESHOLD = 43200 # 12 hours
13
+ UPDATED_WARNING_THRESHOLD = 1000
14
+ VTRUST_WARNING_THRESHOLD = 0.75
15
+
16
+
17
+ def get_latest_version(runs: list[Run]) -> str:
18
+ latest_version = version.parse("0.0.0")
19
+ for run in runs:
20
+ current_version = version.parse(run.version)
21
+ if current_version > latest_version:
22
+ latest_version = current_version
23
+ return str(latest_version)
24
+
25
+
26
+ def colorize(val, color: str) -> str:
27
+ return f"<span style='color: {color}'>{val}</span>"
28
+
29
+
30
+ def create_validator_states() -> gr.Dataframe:
31
+ data: list[list] = []
32
+ runs = sorted(get_current_runs(), key=lambda run: run.uid)
33
+ winners = [run.winner_uid for run in runs if run.winner_uid]
34
+ winner_uid_mode = statistics.mode(winners) if winners else None
35
+ latest_version = get_latest_version(runs)
36
+ for run in runs:
37
+ vtrust = VTRUST.get(run.uid, 0)
38
+ updated = UPDATED.get(run.uid, 0)
39
+ data.append([
40
+ run.uid,
41
+ run.name,
42
+ colorize(run.version, "springgreen" if run.version == latest_version else "red"),
43
+ colorize(run.status.name(), run.status.color()),
44
+ colorize(run.winner_uid, "springgreen" if winner_uid_mode and run.winner_uid == winner_uid_mode else "orange" if run.winner_uid else "gray"),
45
+ f"{len(run.submissions) + len(run.invalid_submissions)}/{run.total_submissions}",
46
+ len(run.invalid_submissions),
47
+ colorize(f"{timedelta(seconds=int(run.average_benchmark_time))}", "orange" if run.average_benchmark_time > AVERAGE_BENCHMARK_TIME_WARNING_THRESHOLD else "springgreen" if run.average_benchmark_time > 0 else "gray"),
48
+ colorize(f"{timedelta(seconds=run.eta)}", "orange" if run.eta > ETA_WARNING_THRESHOLD else "springgreen" if run.eta > 0 else "gray"),
49
+ colorize(f"{vtrust:.4f}", "springgreen" if vtrust > VTRUST_WARNING_THRESHOLD else "red"),
50
+ colorize(updated, "springgreen" if updated < UPDATED_WARNING_THRESHOLD else "red"),
51
+ ])
52
+
53
+ return gr.Dataframe(
54
+ pd.DataFrame(data, columns=["UID", "Name", "Version", "Status", "Winner", "Tested", "Invalid", "Avg. Benchmark Time", "ETA", "VTrust", "Updated"]),
55
+ datatype=["number", "markdown", "markdown", "markdown", "markdown", "number", "number", "markdown", "markdown", "markdown", "markdown"],
56
+ interactive=False,
57
+ )
src/validator_weights.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+
4
+ from chain_data import WEIGHTS_BY_MINER, INCENTIVES, sync_metagraph
5
+
6
+
7
+ def get_color_by_weight(weight: float) -> str:
8
+ if weight < 0.001:
9
+ return "gray"
10
+ elif weight < 0.3:
11
+ r = int(255)
12
+ g = int((weight / 0.3) * 165)
13
+ return f"rgb({r}, {g}, 0)"
14
+ elif weight < 0.8:
15
+ progress = (weight - 0.3) / 0.5
16
+ r = int(255 - (progress * 255))
17
+ g = int(165 + (progress * 90))
18
+ return f"rgb({r}, {g}, 0)"
19
+ else:
20
+ progress = (weight - 0.8) / 0.2
21
+ g = int(255 - ((1 - progress) * 50))
22
+ return f"rgb(0, {g}, 0)"
23
+
24
+ def create_weights() -> gr.Dataframe:
25
+ data: list[list] = []
26
+ sync_metagraph()
27
+
28
+ headers = ["Miner UID", "Incentive"]
29
+ datatype = ["number", "markdown"]
30
+
31
+ validator_uids = set()
32
+ for validator_weights in WEIGHTS_BY_MINER:
33
+ for validator_uid, _ in validator_weights:
34
+ validator_uids.add(validator_uid)
35
+
36
+ for validator_uid in sorted(validator_uids):
37
+ headers.append(str(validator_uid))
38
+ datatype.append("markdown")
39
+
40
+ for miner_uid, validator_weights in enumerate(WEIGHTS_BY_MINER):
41
+ incentive = INCENTIVES[miner_uid]
42
+ row = [miner_uid, f"<span style='color: {get_color_by_weight(incentive)}'>{incentive:.{3}f}</span>"]
43
+ for _, weight in validator_weights:
44
+ row.append(f"<span style='color: {get_color_by_weight(weight)}'>{weight:.{3}f}</span>")
45
+ data.append(row)
46
+
47
+ data.sort(key=lambda val: float(val[1].split(">")[1].split("<")[0]), reverse=True)
48
+
49
+ return gr.Dataframe(
50
+ pd.DataFrame(data, columns=headers),
51
+ datatype=datatype,
52
+ interactive=False,
53
+ )
src/wandb_data.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dataclasses import dataclass
3
+ from datetime import datetime, timedelta, timezone
4
+ from enum import Enum
5
+ from typing import TypeAlias
6
+ from zoneinfo import ZoneInfo
7
+
8
+ import wandb
9
+ import wandb.apis.public as wapi
10
+ from substrateinterface import Keypair
11
+
12
+ WANDB_RUN_PATH = os.environ["WANDB_RUN_PATH"]
13
+
14
+ TIMEZONE = ZoneInfo("America/Los_Angeles")
15
+ START_DATE = datetime(2024, 11, 7)
16
+
17
+ Uid: TypeAlias = int
18
+
19
+
20
+ class BenchmarkStatus(Enum):
21
+ NOT_STARTED = ("Not Started", "orange", False)
22
+ IN_PROGRESS = ("In Progress", "orange", False)
23
+ FINISHED = ("Finished", "springgreen", False)
24
+ INITIALISING = ("Initialising", "orange", False)
25
+ STOPPED = ("Stopped", "red", True)
26
+ CRASHED = ("Crashed", "red", True)
27
+ FAILED = ("Failed", "red", True)
28
+ UNKNOWN = ("Unknown", "red", True)
29
+
30
+ def name(self):
31
+ return self.value[0]
32
+
33
+ def color(self):
34
+ return self.value[1]
35
+
36
+ def failed(self):
37
+ return self.value[2]
38
+
39
+
40
+ @dataclass
41
+ class MetricData:
42
+ generation_time: float
43
+ vram_used: float
44
+ watts_used: float
45
+ load_time: float
46
+ size: int
47
+
48
+
49
+ @dataclass
50
+ class SubmissionInfo:
51
+ uid: int
52
+ hotkey: str
53
+ repository: str
54
+ revision: str
55
+ block: int
56
+
57
+
58
+ @dataclass
59
+ class Submission:
60
+ info: SubmissionInfo
61
+ metrics: MetricData
62
+ average_similarity: float
63
+ min_similarity: float
64
+ tier: int
65
+ score: float
66
+
67
+
68
+ @dataclass
69
+ class InvalidSubmission:
70
+ info: SubmissionInfo
71
+ reason: str
72
+
73
+
74
+ @dataclass
75
+ class Run:
76
+ start_date: datetime
77
+ version: str
78
+ uid: int
79
+ name: str
80
+ hotkey: str
81
+ status: BenchmarkStatus
82
+ average_benchmark_time: float
83
+ eta: int
84
+ winner_uid: int | None
85
+ baseline_metrics: MetricData | None
86
+ total_submissions: int
87
+ submissions: dict[Uid, Submission]
88
+ invalid_submissions: dict[Uid, InvalidSubmission]
89
+
90
+
91
+ RUNS: dict[str, list[Run]] = {}
92
+
93
+
94
+ def _is_valid_run(run: wapi.Run):
95
+ required_config_keys = ["hotkey", "uid", "contest", "signature"]
96
+
97
+ for key in required_config_keys:
98
+ if key not in run.config:
99
+ return False
100
+
101
+ validator_hotkey = run.config["hotkey"]
102
+ contest_name = run.config["contest"]
103
+
104
+ signing_message = f"{run.name}:{validator_hotkey}:{contest_name}"
105
+
106
+ return Keypair(validator_hotkey).verify(signing_message, run.config["signature"])
107
+
108
+
109
+ def _date_from_run(run: wapi.Run) -> datetime:
110
+ return datetime.strptime(run.created_at, "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc).astimezone(TIMEZONE)
111
+
112
+
113
+ def _status_from_run(run: wapi.Run) -> BenchmarkStatus:
114
+ match run.state:
115
+ case "finished":
116
+ return BenchmarkStatus.STOPPED
117
+ case "crashed":
118
+ return BenchmarkStatus.CRASHED
119
+ case "failed":
120
+ return BenchmarkStatus.FAILED
121
+ case "running":
122
+ if "benchmarking_state" in run.summary:
123
+ return BenchmarkStatus[run.summary["benchmarking_state"]]
124
+ else:
125
+ return BenchmarkStatus.INITIALISING
126
+ case _:
127
+ return BenchmarkStatus.UNKNOWN
128
+
129
+
130
+ def _add_runs(wandb_runs: list[wapi.Run]):
131
+ for wandb_run in wandb_runs:
132
+ if not _is_valid_run(wandb_run):
133
+ continue
134
+
135
+ metrics = wandb_run.summary
136
+
137
+ submission_info: dict[Uid, SubmissionInfo] = {}
138
+ submissions: dict[Uid, Submission] = {}
139
+ invalid_submissions: dict[Uid, InvalidSubmission] = {}
140
+
141
+ baseline_metrics: MetricData | None = None
142
+ if "baseline" in metrics:
143
+ baseline = metrics["baseline"]
144
+ baseline_metrics = MetricData(
145
+ generation_time=float(baseline["generation_time"]),
146
+ vram_used=float(baseline["vram_used"]),
147
+ watts_used=float(baseline["watts_used"]),
148
+ load_time=float(baseline["load_time"]),
149
+ size=int(baseline["size"]),
150
+ )
151
+
152
+ if "submissions" in metrics:
153
+ for uid, submission in metrics["submissions"].items():
154
+ submission_info[uid] = SubmissionInfo(
155
+ uid=uid,
156
+ hotkey=submission["hotkey"] if "hotkey" in submission else metrics["benchmarks"][uid]["hotkey"] if uid in metrics["benchmarks"] else "unknown",
157
+ # hotkey=submission["hotkey"], # TODO use this once validators update
158
+ repository=submission["repository"],
159
+ revision=submission["revision"],
160
+ block=submission["block"],
161
+ )
162
+
163
+ if "benchmarks" in metrics:
164
+ for uid, benchmark in metrics["benchmarks"].items():
165
+ model = benchmark["model"]
166
+ submissions[uid] = Submission(
167
+ info=submission_info[uid],
168
+ metrics=MetricData(
169
+ generation_time=float(model["generation_time"]),
170
+ vram_used=float(model["vram_used"]),
171
+ watts_used=float(model["watts_used"]),
172
+ load_time=float(model["load_time"]),
173
+ size=int(model["size"]),
174
+ ),
175
+ average_similarity=float(benchmark["average_similarity"]),
176
+ min_similarity=float(benchmark["min_similarity"]),
177
+ tier=int(benchmark["tier"]),
178
+ score=float(benchmark["score"]),
179
+ )
180
+
181
+ if "invalid" in metrics:
182
+ for uid, reason in metrics["invalid"].items():
183
+ if not uid in submission_info:
184
+ continue
185
+ invalid_submissions[uid] = InvalidSubmission(
186
+ info=submission_info[uid],
187
+ reason=reason,
188
+ )
189
+
190
+ status = _status_from_run(wandb_run)
191
+ winners = sorted(
192
+ submissions.values(),
193
+ key=lambda submission: (submission.tier, -submission.info.block),
194
+ reverse=True,
195
+ )
196
+ winner_uid = winners[0].info.uid if winners and status == status.FINISHED else None
197
+
198
+ from chain_data import VALIDATOR_IDENTITIES
199
+ uid = int(wandb_run.config["uid"])
200
+ hotkey = wandb_run.config["hotkey"]
201
+ date = _date_from_run(wandb_run)
202
+ id = wandb_run.id
203
+ average_benchmark_time = float(wandb_run.summary["average_benchmark_time"]) if "average_benchmark_time" in wandb_run.summary else 0
204
+ run = Run(
205
+ start_date=date,
206
+ version=wandb_run.tags[1][8:],
207
+ uid=uid,
208
+ name=VALIDATOR_IDENTITIES.get(uid, hotkey),
209
+ hotkey=hotkey,
210
+ status=status,
211
+ average_benchmark_time=average_benchmark_time,
212
+ eta=int(average_benchmark_time * (len(submission_info) - len(submissions) - len(invalid_submissions))) if average_benchmark_time else 0,
213
+ winner_uid=winner_uid,
214
+ baseline_metrics=baseline_metrics,
215
+ total_submissions=len(submission_info),
216
+ submissions=submissions,
217
+ invalid_submissions=invalid_submissions,
218
+ )
219
+
220
+ if id not in RUNS:
221
+ RUNS[id] = [run]
222
+ else:
223
+ present = False
224
+ for i, existing_run in enumerate(RUNS[id]):
225
+ if existing_run.uid == run.uid:
226
+ RUNS[id][i] = run
227
+ present = True
228
+ break
229
+
230
+ if not present:
231
+ RUNS[id].append(run)
232
+
233
+
234
+ def _fetch_history(wandb_api: wandb.Api):
235
+ wandb_runs = wandb_api.runs(
236
+ WANDB_RUN_PATH,
237
+ filters={"config.type": "validator", "created_at": {'$gt': str(START_DATE)}},
238
+ order="+created_at",
239
+ )
240
+ _add_runs(wandb_runs)
241
+
242
+
243
+ def _fetch_current_runs(wandb_api: wandb.Api, now: datetime):
244
+ contest_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
245
+ wandb_runs = wandb_api.runs(
246
+ WANDB_RUN_PATH,
247
+ filters={"config.type": "validator", "created_at": {'$gt': str(contest_start)}},
248
+ order="+created_at",
249
+ )
250
+ _add_runs(wandb_runs)
251
+
252
+
253
+ last_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
254
+
255
+
256
+ def sync():
257
+ global last_sync
258
+ now = datetime.now(TIMEZONE)
259
+ if now - last_sync < timedelta(seconds=30):
260
+ return
261
+
262
+ print("Syncing runs...")
263
+ last_sync = now
264
+ wandb_api = wandb.Api()
265
+ if not RUNS:
266
+ _fetch_history(wandb_api)
267
+ else:
268
+ _fetch_current_runs(wandb_api, now)
269
+
270
+
271
+ def get_current_runs() -> list[Run]:
272
+ sync()
273
+ from chain_data import sync_metagraph
274
+ sync_metagraph()
275
+
276
+ now = datetime.now(TIMEZONE)
277
+ today = now.replace(hour=0, minute=0, second=0, microsecond=0)
278
+ if now.hour < 12:
279
+ today -= timedelta(days=1)
280
+
281
+ current_runs: list[Run] = []
282
+
283
+ for runs in RUNS.values():
284
+ for run in runs:
285
+ if run.start_date >= today:
286
+ current_runs.append(run)
287
+
288
+ return current_runs