Spaces:
Running
Running
Commit
·
8cdce17
1
Parent(s):
dbf670f
Add submissions tab
Browse files- src/app.py +6 -0
- src/chain_data.py +82 -5
- src/leaderboard.py +1 -0
- src/network_commitments.py +48 -0
- src/submissions.py +48 -0
- src/validator_states.py +1 -0
- src/validator_weights.py +1 -0
- src/wandb_data.py +3 -3
src/app.py
CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
|
|
2 |
|
3 |
from chain_data import sync_metagraph
|
4 |
from leaderboard import create_leaderboard, create_dropdown
|
|
|
5 |
from validator_states import create_validator_states
|
6 |
from validator_weights import create_weights
|
7 |
from wandb_data import sync
|
@@ -35,6 +36,11 @@ def main():
|
|
35 |
|
36 |
include_inactive_checkbox.change(lambda include_inactive: create_weights(include_inactive), [include_inactive_checkbox], [validator_weights_dataframe])
|
37 |
|
|
|
|
|
|
|
|
|
|
|
38 |
with gr.Tab("Model Demo"):
|
39 |
gr.Label("Coming soon!", show_label=False)
|
40 |
app.launch()
|
|
|
2 |
|
3 |
from chain_data import sync_metagraph
|
4 |
from leaderboard import create_leaderboard, create_dropdown
|
5 |
+
from submissions import create_submissions
|
6 |
from validator_states import create_validator_states
|
7 |
from validator_weights import create_weights
|
8 |
from wandb_data import sync
|
|
|
36 |
|
37 |
include_inactive_checkbox.change(lambda include_inactive: create_weights(include_inactive), [include_inactive_checkbox], [validator_weights_dataframe])
|
38 |
|
39 |
+
with gr.Tab("Submissions") as submissions_tab:
|
40 |
+
submissions_dataframe = gr.Dataframe()
|
41 |
+
submissions_dataframe.attach_load_event(lambda: create_submissions(), None)
|
42 |
+
submissions_tab.select(lambda: create_submissions(), [], [submissions_dataframe])
|
43 |
+
|
44 |
with gr.Tab("Model Demo"):
|
45 |
gr.Label("Coming soon!", show_label=False)
|
46 |
app.launch()
|
src/chain_data.py
CHANGED
@@ -1,21 +1,58 @@
|
|
1 |
from concurrent.futures import ThreadPoolExecutor
|
|
|
2 |
from datetime import datetime, timedelta
|
|
|
3 |
from math import ceil
|
4 |
from typing import TypeAlias
|
5 |
|
6 |
from fiber.chain.interface import get_substrate
|
7 |
from fiber.chain.metagraph import Metagraph
|
8 |
from fiber.chain.models import Node
|
|
|
9 |
from substrateinterface.storage import StorageKey
|
10 |
|
|
|
11 |
from wandb_data import Hotkey, Uid, TIMEZONE
|
12 |
|
13 |
Weight: TypeAlias = float
|
14 |
Incentive: TypeAlias = float
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
NET_UID = 39
|
17 |
WEIGHTS_BY_MINER: dict[Hotkey, list[tuple[Hotkey, Weight]]] = {}
|
18 |
VALIDATOR_IDENTITIES: dict[Hotkey, str] = {}
|
|
|
19 |
UPDATED: dict[Hotkey, int] = {}
|
20 |
|
21 |
UIDS_BY_HOTKEY: dict[Hotkey, Uid] = {}
|
@@ -96,9 +133,44 @@ def fetch_identities(block: int):
|
|
96 |
VALIDATOR_IDENTITIES[hotkey] = info.value["name"]
|
97 |
break
|
98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
last_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
|
101 |
last_identity_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
|
|
|
102 |
|
103 |
|
104 |
def sync_metagraph(timeout: int = 10):
|
@@ -121,11 +193,16 @@ def sync_metagraph(timeout: int = 10):
|
|
121 |
fetch_updated(block)
|
122 |
|
123 |
global last_identity_sync
|
124 |
-
if now - last_identity_sync
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
129 |
|
130 |
with ThreadPoolExecutor(max_workers=1) as executor:
|
131 |
future = executor.submit(sync_task)
|
|
|
1 |
from concurrent.futures import ThreadPoolExecutor
|
2 |
+
from dataclasses import dataclass
|
3 |
from datetime import datetime, timedelta
|
4 |
+
from enum import Enum
|
5 |
from math import ceil
|
6 |
from typing import TypeAlias
|
7 |
|
8 |
from fiber.chain.interface import get_substrate
|
9 |
from fiber.chain.metagraph import Metagraph
|
10 |
from fiber.chain.models import Node
|
11 |
+
from fiber.chain.commitments import _deserialize_commitment_field
|
12 |
from substrateinterface.storage import StorageKey
|
13 |
|
14 |
+
from network_commitments import Decoder
|
15 |
from wandb_data import Hotkey, Uid, TIMEZONE
|
16 |
|
17 |
Weight: TypeAlias = float
|
18 |
Incentive: TypeAlias = float
|
19 |
|
20 |
+
class ContestId(Enum):
|
21 |
+
SDXL_APPLE_SILICON = 0
|
22 |
+
SDXL_NEWDREAM_NVIDIA_4090 = 1
|
23 |
+
FLUX_NVIDIA_4090 = 2
|
24 |
+
|
25 |
+
@dataclass
|
26 |
+
class Commitment:
|
27 |
+
provider: str
|
28 |
+
repository: str
|
29 |
+
revision: str
|
30 |
+
contest: ContestId
|
31 |
+
block: int
|
32 |
+
|
33 |
+
@classmethod
|
34 |
+
def decode(cls, decoder: Decoder, block: int):
|
35 |
+
provider = decoder.read_str()
|
36 |
+
repository = decoder.read_str()
|
37 |
+
revision = decoder.read_sized_str(7)
|
38 |
+
contest_id = ContestId(decoder.read_uint16())
|
39 |
+
|
40 |
+
return cls(
|
41 |
+
provider=provider,
|
42 |
+
repository=repository,
|
43 |
+
revision=revision,
|
44 |
+
contest=contest_id,
|
45 |
+
block=block
|
46 |
+
)
|
47 |
+
|
48 |
+
def get_repo_link(self):
|
49 |
+
return f"https://{self.provider}/{self.repository}"
|
50 |
+
|
51 |
+
SPEC_VERSION = 7
|
52 |
NET_UID = 39
|
53 |
WEIGHTS_BY_MINER: dict[Hotkey, list[tuple[Hotkey, Weight]]] = {}
|
54 |
VALIDATOR_IDENTITIES: dict[Hotkey, str] = {}
|
55 |
+
COMMITMENTS: dict[Hotkey, Commitment] = {}
|
56 |
UPDATED: dict[Hotkey, int] = {}
|
57 |
|
58 |
UIDS_BY_HOTKEY: dict[Hotkey, Uid] = {}
|
|
|
133 |
VALIDATOR_IDENTITIES[hotkey] = info.value["name"]
|
134 |
break
|
135 |
|
136 |
+
def fetch_commitments(block: int):
|
137 |
+
COMMITMENTS.clear()
|
138 |
+
storage_keys: list[StorageKey] = []
|
139 |
+
for hotkey, node in metagraph.nodes.items():
|
140 |
+
if is_validator(node): continue
|
141 |
+
storage_keys.append(substrate.create_storage_key(
|
142 |
+
"Commitments",
|
143 |
+
"CommitmentOf",
|
144 |
+
[metagraph.netuid, hotkey]
|
145 |
+
))
|
146 |
+
|
147 |
+
commitments = query_subtensor(storage_keys, block)
|
148 |
+
for storage, commitment in commitments:
|
149 |
+
try:
|
150 |
+
if not commitment:
|
151 |
+
continue
|
152 |
+
|
153 |
+
fields = commitment.value["info"]["fields"]
|
154 |
+
if not fields:
|
155 |
+
continue
|
156 |
+
|
157 |
+
field = _deserialize_commitment_field(fields[0])
|
158 |
+
if field is None:
|
159 |
+
continue
|
160 |
+
|
161 |
+
decoder = Decoder(field[1])
|
162 |
+
spec_version = decoder.read_uint16()
|
163 |
+
if spec_version != SPEC_VERSION:
|
164 |
+
continue
|
165 |
+
|
166 |
+
COMMITMENTS[storage.params[1]] = Commitment.decode(decoder, int(commitment.value["block"]))
|
167 |
+
except:
|
168 |
+
continue
|
169 |
+
|
170 |
|
171 |
last_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
|
172 |
last_identity_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
|
173 |
+
last_commitment_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
|
174 |
|
175 |
|
176 |
def sync_metagraph(timeout: int = 10):
|
|
|
193 |
fetch_updated(block)
|
194 |
|
195 |
global last_identity_sync
|
196 |
+
if now - last_identity_sync > timedelta(days=1):
|
197 |
+
print("Syncing identities...")
|
198 |
+
last_identity_sync = now
|
199 |
+
fetch_identities(block)
|
200 |
+
|
201 |
+
global last_commitment_sync
|
202 |
+
if now - last_commitment_sync > timedelta(hours=12):
|
203 |
+
print("Syncing commitments...")
|
204 |
+
last_commitment_sync = now
|
205 |
+
fetch_commitments(block)
|
206 |
|
207 |
with ThreadPoolExecutor(max_workers=1) as executor:
|
208 |
future = executor.submit(sync_task)
|
src/leaderboard.py
CHANGED
@@ -55,4 +55,5 @@ def create_leaderboard(validator_uid) -> gr.Dataframe:
|
|
55 |
pd.DataFrame(data, columns=["UID", "Model", "Tier", "Score", "Gen Time", "Similarity", "Size", "VRAM Usage", "Power Usage", "Load Time", "Block", "Revision", "Hotkey"]),
|
56 |
datatype=["number", "markdown", "number", "number", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown"],
|
57 |
interactive=False,
|
|
|
58 |
)
|
|
|
55 |
pd.DataFrame(data, columns=["UID", "Model", "Tier", "Score", "Gen Time", "Similarity", "Size", "VRAM Usage", "Power Usage", "Load Time", "Block", "Revision", "Hotkey"]),
|
56 |
datatype=["number", "markdown", "number", "number", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown"],
|
57 |
interactive=False,
|
58 |
+
max_height=800,
|
59 |
)
|
src/network_commitments.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from struct import pack, unpack
|
2 |
+
|
3 |
+
|
4 |
+
_UINT_16_SIZE = 2
|
5 |
+
_UINT_32_SIZE = 4
|
6 |
+
|
7 |
+
|
8 |
+
class Decoder:
|
9 |
+
_position: int
|
10 |
+
_data: bytes
|
11 |
+
|
12 |
+
def __init__(self, data: bytes):
|
13 |
+
self._position = 0
|
14 |
+
self._data = data
|
15 |
+
|
16 |
+
def read_uint16(self):
|
17 |
+
value = int.from_bytes(self._data[self._position:self._position + _UINT_16_SIZE], "big")
|
18 |
+
self._position += _UINT_16_SIZE
|
19 |
+
|
20 |
+
return value
|
21 |
+
|
22 |
+
def read_uint32(self):
|
23 |
+
value = int.from_bytes(self._data[self._position:self._position + _UINT_32_SIZE], "big")
|
24 |
+
self._position += _UINT_32_SIZE
|
25 |
+
|
26 |
+
return value
|
27 |
+
|
28 |
+
def read_float(self) -> float:
|
29 |
+
return unpack(">f", pack(">L", self.read_uint32()))[0]
|
30 |
+
|
31 |
+
def read_str(self):
|
32 |
+
length = self._data[self._position]
|
33 |
+
self._position += 1
|
34 |
+
|
35 |
+
value = self._data[self._position:self._position + length]
|
36 |
+
self._position += length
|
37 |
+
|
38 |
+
return value.decode()
|
39 |
+
|
40 |
+
def read_sized_str(self, length: int):
|
41 |
+
value = self._data[self._position:self._position + length]
|
42 |
+
self._position += length
|
43 |
+
|
44 |
+
return value.decode()
|
45 |
+
|
46 |
+
@property
|
47 |
+
def eof(self):
|
48 |
+
return self._position >= len(self._data)
|
src/submissions.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
from chain_data import sync_metagraph, COMMITMENTS, UIDS_BY_HOTKEY
|
7 |
+
from wandb_data import get_current_runs, Run, Uid
|
8 |
+
|
9 |
+
DEFAULT_VALIDATOR_UID = int(os.environ["DEFAULT_VALIDATOR_UID"])
|
10 |
+
|
11 |
+
def get_status(run: Run, uid: Uid, block: int) -> tuple[str, str]:
|
12 |
+
if all(not submission.get(uid) or block > submission[uid].info.block for submission in [run.submissions, run.invalid_submissions]):
|
13 |
+
return "Pending", "orange"
|
14 |
+
|
15 |
+
if uid in run.submissions:
|
16 |
+
return "Benchmarked", "springgreen"
|
17 |
+
elif uid in run.invalid_submissions:
|
18 |
+
return "Invalid", "red"
|
19 |
+
else:
|
20 |
+
return "Pending", "orange"
|
21 |
+
|
22 |
+
def create_submissions() -> gr.Dataframe:
|
23 |
+
data: list[list] = []
|
24 |
+
sync_metagraph()
|
25 |
+
for run in get_current_runs():
|
26 |
+
if not run.uid == DEFAULT_VALIDATOR_UID:
|
27 |
+
continue
|
28 |
+
for hotkey, commitment in COMMITMENTS.items():
|
29 |
+
uid = UIDS_BY_HOTKEY[hotkey]
|
30 |
+
status, color = get_status(run, uid, commitment.block)
|
31 |
+
data.append([
|
32 |
+
uid,
|
33 |
+
f"[{'/'.join(commitment.get_repo_link().split('/')[-2:])}]({commitment.get_repo_link()})",
|
34 |
+
f"[{commitment.block}](https://taostats.io/block/{commitment.block})",
|
35 |
+
f"[{commitment.revision}]({commitment.get_repo_link()}/commit/{commitment.revision})",
|
36 |
+
f"[{hotkey[:6]}...](https://taostats.io/hotkey/{hotkey})",
|
37 |
+
commitment.contest.name,
|
38 |
+
f"<span style='color: {color}'>{status}</span>"
|
39 |
+
])
|
40 |
+
|
41 |
+
data.sort(key=lambda x: int(x[2].split('[')[1].split(']')[0]), reverse=True)
|
42 |
+
|
43 |
+
return gr.Dataframe(
|
44 |
+
pd.DataFrame(data, columns=["UID", "Model", "Block", "Revision", "Hotkey", "Contest", "Status"]),
|
45 |
+
datatype=["number", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown"],
|
46 |
+
interactive=False,
|
47 |
+
max_height=800,
|
48 |
+
)
|
src/validator_states.py
CHANGED
@@ -54,4 +54,5 @@ def create_validator_states() -> gr.Dataframe:
|
|
54 |
pd.DataFrame(data, columns=["UID", "Name", "Version", "Status", "Winner", "Tested", "Invalid", "Avg. Benchmark Time", "ETA", "VTrust", "Updated"]),
|
55 |
datatype=["number", "markdown", "markdown", "markdown", "markdown", "number", "number", "markdown", "markdown", "markdown", "markdown"],
|
56 |
interactive=False,
|
|
|
57 |
)
|
|
|
54 |
pd.DataFrame(data, columns=["UID", "Name", "Version", "Status", "Winner", "Tested", "Invalid", "Avg. Benchmark Time", "ETA", "VTrust", "Updated"]),
|
55 |
datatype=["number", "markdown", "markdown", "markdown", "markdown", "number", "number", "markdown", "markdown", "markdown", "markdown"],
|
56 |
interactive=False,
|
57 |
+
max_height=800,
|
58 |
)
|
src/validator_weights.py
CHANGED
@@ -66,4 +66,5 @@ def create_weights(include_inactive: bool) -> gr.Dataframe:
|
|
66 |
pd.DataFrame(data, columns=headers),
|
67 |
datatype=datatype,
|
68 |
interactive=False,
|
|
|
69 |
)
|
|
|
66 |
pd.DataFrame(data, columns=headers),
|
67 |
datatype=datatype,
|
68 |
interactive=False,
|
69 |
+
max_height=800,
|
70 |
)
|
src/wandb_data.py
CHANGED
@@ -154,7 +154,7 @@ def _add_runs(wandb_runs: list[wapi.Run]):
|
|
154 |
if "submissions" in metrics:
|
155 |
for uid, submission in metrics["submissions"].items():
|
156 |
submission_info[uid] = SubmissionInfo(
|
157 |
-
uid=uid,
|
158 |
hotkey=submission["hotkey"],
|
159 |
repository=submission["repository"],
|
160 |
revision=submission["revision"],
|
@@ -166,7 +166,7 @@ def _add_runs(wandb_runs: list[wapi.Run]):
|
|
166 |
model = benchmark["model"]
|
167 |
if uid not in submission_info:
|
168 |
continue
|
169 |
-
submissions[uid] = Submission(
|
170 |
info=submission_info[uid],
|
171 |
metrics=MetricData(
|
172 |
generation_time=float(model["generation_time"]),
|
@@ -185,7 +185,7 @@ def _add_runs(wandb_runs: list[wapi.Run]):
|
|
185 |
for uid, reason in metrics["invalid"].items():
|
186 |
if not uid in submission_info:
|
187 |
continue
|
188 |
-
invalid_submissions[uid] = InvalidSubmission(
|
189 |
info=submission_info[uid],
|
190 |
reason=reason,
|
191 |
)
|
|
|
154 |
if "submissions" in metrics:
|
155 |
for uid, submission in metrics["submissions"].items():
|
156 |
submission_info[uid] = SubmissionInfo(
|
157 |
+
uid=int(uid),
|
158 |
hotkey=submission["hotkey"],
|
159 |
repository=submission["repository"],
|
160 |
revision=submission["revision"],
|
|
|
166 |
model = benchmark["model"]
|
167 |
if uid not in submission_info:
|
168 |
continue
|
169 |
+
submissions[int(uid)] = Submission(
|
170 |
info=submission_info[uid],
|
171 |
metrics=MetricData(
|
172 |
generation_time=float(model["generation_time"]),
|
|
|
185 |
for uid, reason in metrics["invalid"].items():
|
186 |
if not uid in submission_info:
|
187 |
continue
|
188 |
+
invalid_submissions[int(uid)] = InvalidSubmission(
|
189 |
info=submission_info[uid],
|
190 |
reason=reason,
|
191 |
)
|