Spaces:
Running
Running
Commit
·
5bbddad
1
Parent(s):
b04831c
Use taostats for chain data
Browse files- requirements.txt +2 -1
- src/app.py +2 -2
- src/chain_data.py +67 -42
- src/submissions.py +12 -7
- src/validator_states.py +4 -4
- src/validator_weights.py +8 -7
- src/wandb_data.py +25 -19
requirements.txt
CHANGED
@@ -5,4 +5,5 @@ substrate-interface==1.7.10
|
|
5 |
plotly==5.24.1
|
6 |
pandas==2.2.3
|
7 |
packaging==24.2
|
8 |
-
netaddr==1.3.0
|
|
|
|
5 |
plotly==5.24.1
|
6 |
pandas==2.2.3
|
7 |
packaging==24.2
|
8 |
+
netaddr==1.3.0
|
9 |
+
cachetools==5.5.0
|
src/app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
from chain_data import
|
4 |
from leaderboard import create_leaderboard, create_dropdown, create_baseline
|
5 |
from model_demo import create_demo
|
6 |
from submissions import create_submissions, DROPDOWN_OPTIONS
|
@@ -10,7 +10,7 @@ from wandb_data import sync
|
|
10 |
|
11 |
|
12 |
def main():
|
13 |
-
|
14 |
sync()
|
15 |
with gr.Blocks(css=".typewriter {font-family: 'JMH Typewriter', sans-serif;}", fill_height=True, fill_width=True) as app:
|
16 |
with gr.Tab("Leaderboard") as leaderboard_tab:
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
+
from chain_data import sync_chain
|
4 |
from leaderboard import create_leaderboard, create_dropdown, create_baseline
|
5 |
from model_demo import create_demo
|
6 |
from submissions import create_submissions, DROPDOWN_OPTIONS
|
|
|
10 |
|
11 |
|
12 |
def main():
|
13 |
+
sync_chain()
|
14 |
sync()
|
15 |
with gr.Blocks(css=".typewriter {font-family: 'JMH Typewriter', sans-serif;}", fill_height=True, fill_width=True) as app:
|
16 |
with gr.Tab("Leaderboard") as leaderboard_tab:
|
src/chain_data.py
CHANGED
@@ -3,20 +3,21 @@ import traceback
|
|
3 |
from dataclasses import dataclass
|
4 |
from datetime import datetime, timedelta
|
5 |
from enum import Enum
|
6 |
-
from math import ceil
|
7 |
from typing import TypeAlias
|
8 |
|
|
|
|
|
9 |
from fiber import constants
|
10 |
from fiber.chain.commitments import _deserialize_commitment_field
|
11 |
from fiber.chain.interface import get_substrate
|
12 |
-
from
|
13 |
-
from fiber.chain.models import Node
|
14 |
from substrateinterface import SubstrateInterface
|
15 |
from substrateinterface.storage import StorageKey
|
16 |
|
17 |
from network_commitments import Decoder
|
18 |
from src import Key, Uid, TIMEZONE
|
19 |
|
|
|
20 |
DISABLE_COMMITMENTS_FETCH = int(os.getenv("DISABLE_COMMITMENTS_FETCH") or 0) > 0
|
21 |
|
22 |
Weight: TypeAlias = float
|
@@ -28,6 +29,17 @@ class ContestId(Enum):
|
|
28 |
SDXL_NEWDREAM_NVIDIA_4090 = 1
|
29 |
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
@dataclass
|
32 |
class Commitment:
|
33 |
provider: str
|
@@ -60,13 +72,10 @@ NET_UID = 39
|
|
60 |
WEIGHTS_BY_MINER: dict[Key, list[tuple[Key, Weight]]] = {}
|
61 |
VALIDATOR_IDENTITIES: dict[Key, str] = {}
|
62 |
COMMITMENTS: dict[Key, Commitment] = {}
|
63 |
-
UPDATED: dict[Key, int] = {}
|
64 |
|
65 |
-
UIDS_BY_HOTKEY: dict[Key, Uid] = {}
|
66 |
HOTKEYS_BY_UID: dict[Uid, Key] = {}
|
67 |
|
68 |
substrate = get_substrate(subtensor_address=constants.FINNEY_SUBTENSOR_ADDRESS)
|
69 |
-
metagraph = Metagraph(substrate, netuid=str(NET_UID), load_old_nodes=False)
|
70 |
|
71 |
|
72 |
def query_subtensor(storage_keys: list[StorageKey], block: int) -> list:
|
@@ -81,61 +90,47 @@ def query_subtensor(storage_keys: list[StorageKey], block: int) -> list:
|
|
81 |
raise
|
82 |
|
83 |
|
84 |
-
def is_validator(node: Node) -> bool:
|
85 |
-
return node.vtrust > 0 or node.stake > 10_000
|
86 |
-
|
87 |
-
|
88 |
-
def get_nodes() -> dict[Key, Node]:
|
89 |
-
return metagraph.nodes
|
90 |
-
|
91 |
-
|
92 |
def fetch_weights(block: int):
|
93 |
WEIGHTS_BY_MINER.clear()
|
94 |
storage_keys: list[StorageKey] = []
|
95 |
-
for hotkey,
|
96 |
-
if not
|
97 |
storage_keys.append(substrate.create_storage_key(
|
98 |
"SubtensorModule",
|
99 |
"Weights",
|
100 |
-
[
|
101 |
))
|
102 |
|
103 |
weights = query_subtensor(storage_keys, block)
|
104 |
|
105 |
-
for hotkey,
|
106 |
for storage, validator_weights in weights:
|
107 |
validator_hotkey = HOTKEYS_BY_UID[storage.params[1]]
|
108 |
if hotkey not in WEIGHTS_BY_MINER:
|
109 |
WEIGHTS_BY_MINER[hotkey] = []
|
110 |
weight = 0.0
|
111 |
for miner_weight in validator_weights:
|
112 |
-
if miner_weight[0].value ==
|
113 |
weight = miner_weight[1].value / 2 ** 16
|
114 |
break
|
115 |
WEIGHTS_BY_MINER[hotkey].append((validator_hotkey, weight))
|
116 |
|
117 |
|
118 |
-
def fetch_updated(block: int):
|
119 |
-
UPDATED.clear()
|
120 |
-
for hotkey, node in metagraph.nodes.items():
|
121 |
-
UPDATED[hotkey] = ceil(block - node.last_updated)
|
122 |
-
|
123 |
-
|
124 |
def fetch_identities(block: int):
|
125 |
VALIDATOR_IDENTITIES.clear()
|
126 |
storage_keys: list[StorageKey] = []
|
127 |
-
for hotkey,
|
128 |
-
if not
|
129 |
storage_keys.append(substrate.create_storage_key(
|
130 |
"SubtensorModule",
|
131 |
"Identities",
|
132 |
-
[
|
133 |
))
|
134 |
|
135 |
identities = query_subtensor(storage_keys, block)
|
136 |
-
for hotkey,
|
137 |
for storage, info in identities:
|
138 |
-
if
|
139 |
if info != None: # noqa
|
140 |
VALIDATOR_IDENTITIES[hotkey] = info.value["name"]
|
141 |
break
|
@@ -147,12 +142,12 @@ def fetch_commitments(block: int):
|
|
147 |
|
148 |
COMMITMENTS.clear()
|
149 |
storage_keys: list[StorageKey] = []
|
150 |
-
for hotkey,
|
151 |
-
if
|
152 |
storage_keys.append(substrate.create_storage_key(
|
153 |
"Commitments",
|
154 |
"CommitmentOf",
|
155 |
-
[
|
156 |
))
|
157 |
|
158 |
commitments = query_subtensor(storage_keys, block)
|
@@ -184,7 +179,39 @@ last_identity_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
|
|
184 |
last_commitment_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
|
185 |
|
186 |
|
187 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
global substrate
|
189 |
global last_sync
|
190 |
now = datetime.now(TIMEZONE)
|
@@ -193,16 +220,14 @@ def sync_metagraph():
|
|
193 |
last_sync = now
|
194 |
|
195 |
try:
|
196 |
-
print("Syncing
|
197 |
-
|
198 |
-
block =
|
199 |
|
200 |
-
for
|
201 |
-
|
202 |
-
HOTKEYS_BY_UID[uid] = node.hotkey
|
203 |
|
204 |
fetch_weights(block)
|
205 |
-
fetch_updated(block)
|
206 |
|
207 |
global last_identity_sync
|
208 |
if now - last_identity_sync > timedelta(days=1):
|
@@ -216,6 +241,6 @@ def sync_metagraph():
|
|
216 |
last_commitment_sync = now
|
217 |
fetch_commitments(block)
|
218 |
except Exception:
|
219 |
-
print(f"Error occurred while syncing
|
220 |
traceback.print_exc()
|
221 |
substrate = SubstrateInterface(substrate.url)
|
|
|
3 |
from dataclasses import dataclass
|
4 |
from datetime import datetime, timedelta
|
5 |
from enum import Enum
|
|
|
6 |
from typing import TypeAlias
|
7 |
|
8 |
+
import requests
|
9 |
+
from cachetools import TTLCache, cached
|
10 |
from fiber import constants
|
11 |
from fiber.chain.commitments import _deserialize_commitment_field
|
12 |
from fiber.chain.interface import get_substrate
|
13 |
+
from pydantic import BaseModel
|
|
|
14 |
from substrateinterface import SubstrateInterface
|
15 |
from substrateinterface.storage import StorageKey
|
16 |
|
17 |
from network_commitments import Decoder
|
18 |
from src import Key, Uid, TIMEZONE
|
19 |
|
20 |
+
TAOSTATS_API_KEY = os.getenv("TAOSTATS_API_KEY")
|
21 |
DISABLE_COMMITMENTS_FETCH = int(os.getenv("DISABLE_COMMITMENTS_FETCH") or 0) > 0
|
22 |
|
23 |
Weight: TypeAlias = float
|
|
|
29 |
SDXL_NEWDREAM_NVIDIA_4090 = 1
|
30 |
|
31 |
|
32 |
+
class Neuron(BaseModel):
|
33 |
+
hotkey: str
|
34 |
+
coldkey: str
|
35 |
+
validator_trust: float
|
36 |
+
validator_permit: bool
|
37 |
+
incentive: float
|
38 |
+
updated: int
|
39 |
+
uid: int
|
40 |
+
block_number: int
|
41 |
+
|
42 |
+
|
43 |
@dataclass
|
44 |
class Commitment:
|
45 |
provider: str
|
|
|
72 |
WEIGHTS_BY_MINER: dict[Key, list[tuple[Key, Weight]]] = {}
|
73 |
VALIDATOR_IDENTITIES: dict[Key, str] = {}
|
74 |
COMMITMENTS: dict[Key, Commitment] = {}
|
|
|
75 |
|
|
|
76 |
HOTKEYS_BY_UID: dict[Uid, Key] = {}
|
77 |
|
78 |
substrate = get_substrate(subtensor_address=constants.FINNEY_SUBTENSOR_ADDRESS)
|
|
|
79 |
|
80 |
|
81 |
def query_subtensor(storage_keys: list[StorageKey], block: int) -> list:
|
|
|
90 |
raise
|
91 |
|
92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
def fetch_weights(block: int):
|
94 |
WEIGHTS_BY_MINER.clear()
|
95 |
storage_keys: list[StorageKey] = []
|
96 |
+
for hotkey, neuron in get_neurons().items():
|
97 |
+
if not neuron.validator_permit: continue
|
98 |
storage_keys.append(substrate.create_storage_key(
|
99 |
"SubtensorModule",
|
100 |
"Weights",
|
101 |
+
[NET_UID, neuron.uid]
|
102 |
))
|
103 |
|
104 |
weights = query_subtensor(storage_keys, block)
|
105 |
|
106 |
+
for hotkey, neuron in get_neurons().items():
|
107 |
for storage, validator_weights in weights:
|
108 |
validator_hotkey = HOTKEYS_BY_UID[storage.params[1]]
|
109 |
if hotkey not in WEIGHTS_BY_MINER:
|
110 |
WEIGHTS_BY_MINER[hotkey] = []
|
111 |
weight = 0.0
|
112 |
for miner_weight in validator_weights:
|
113 |
+
if miner_weight[0].value == neuron.uid:
|
114 |
weight = miner_weight[1].value / 2 ** 16
|
115 |
break
|
116 |
WEIGHTS_BY_MINER[hotkey].append((validator_hotkey, weight))
|
117 |
|
118 |
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
def fetch_identities(block: int):
|
120 |
VALIDATOR_IDENTITIES.clear()
|
121 |
storage_keys: list[StorageKey] = []
|
122 |
+
for hotkey, neuron in get_neurons().items():
|
123 |
+
if not neuron.validator_permit: continue
|
124 |
storage_keys.append(substrate.create_storage_key(
|
125 |
"SubtensorModule",
|
126 |
"Identities",
|
127 |
+
[neuron.coldkey]
|
128 |
))
|
129 |
|
130 |
identities = query_subtensor(storage_keys, block)
|
131 |
+
for hotkey, neuron in get_neurons().items():
|
132 |
for storage, info in identities:
|
133 |
+
if neuron.coldkey != storage.params[0]: continue
|
134 |
if info != None: # noqa
|
135 |
VALIDATOR_IDENTITIES[hotkey] = info.value["name"]
|
136 |
break
|
|
|
142 |
|
143 |
COMMITMENTS.clear()
|
144 |
storage_keys: list[StorageKey] = []
|
145 |
+
for hotkey, neuron in get_neurons().items():
|
146 |
+
if neuron.validator_permit: continue
|
147 |
storage_keys.append(substrate.create_storage_key(
|
148 |
"Commitments",
|
149 |
"CommitmentOf",
|
150 |
+
[NET_UID, hotkey]
|
151 |
))
|
152 |
|
153 |
commitments = query_subtensor(storage_keys, block)
|
|
|
179 |
last_commitment_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
|
180 |
|
181 |
|
182 |
+
@cached(cache=TTLCache(maxsize=1, ttl=120))
|
183 |
+
def get_neurons() -> dict[Key, Neuron]:
|
184 |
+
response = requests.get(
|
185 |
+
"https://api.taostats.io/api/metagraph/latest/v1",
|
186 |
+
headers={
|
187 |
+
"accept": "application/json",
|
188 |
+
"Authorization": TAOSTATS_API_KEY,
|
189 |
+
},
|
190 |
+
params={
|
191 |
+
"netuid": 39,
|
192 |
+
}
|
193 |
+
)
|
194 |
+
|
195 |
+
response.raise_for_status()
|
196 |
+
|
197 |
+
return {
|
198 |
+
neuron["hotkey"]["ss58"]: Neuron(
|
199 |
+
hotkey=neuron["hotkey"]["ss58"],
|
200 |
+
coldkey=neuron["coldkey"]["ss58"],
|
201 |
+
validator_trust=float(neuron["validator_trust"]),
|
202 |
+
validator_permit=bool(neuron["validator_permit"]),
|
203 |
+
incentive=float(neuron["incentive"]),
|
204 |
+
updated=int(neuron["updated"]),
|
205 |
+
uid=int(neuron["uid"]),
|
206 |
+
block_number=int(neuron["block_number"]),
|
207 |
+
) for neuron in response.json()["data"]
|
208 |
+
}
|
209 |
+
|
210 |
+
def get_latest_block():
|
211 |
+
return max([neuron.block_number for neuron in get_neurons().values()])
|
212 |
+
|
213 |
+
|
214 |
+
def sync_chain():
|
215 |
global substrate
|
216 |
global last_sync
|
217 |
now = datetime.now(TIMEZONE)
|
|
|
220 |
last_sync = now
|
221 |
|
222 |
try:
|
223 |
+
print("Syncing chain...")
|
224 |
+
|
225 |
+
block = get_latest_block()
|
226 |
|
227 |
+
for hotkey, neuron in get_neurons().items():
|
228 |
+
HOTKEYS_BY_UID[neuron.uid] = hotkey
|
|
|
229 |
|
230 |
fetch_weights(block)
|
|
|
231 |
|
232 |
global last_identity_sync
|
233 |
if now - last_identity_sync > timedelta(days=1):
|
|
|
241 |
last_commitment_sync = now
|
242 |
fetch_commitments(block)
|
243 |
except Exception:
|
244 |
+
print(f"Error occurred while syncing chain")
|
245 |
traceback.print_exc()
|
246 |
substrate = SubstrateInterface(substrate.url)
|
src/submissions.py
CHANGED
@@ -3,9 +3,10 @@ from enum import Enum
|
|
3 |
import gradio as gr
|
4 |
import pandas as pd
|
5 |
|
6 |
-
from chain_data import
|
7 |
from src import Key
|
8 |
-
from
|
|
|
9 |
|
10 |
|
11 |
class SubmissionStatus(Enum):
|
@@ -17,12 +18,13 @@ class SubmissionStatus(Enum):
|
|
17 |
|
18 |
@staticmethod
|
19 |
def get_status(run: Run, hotkey: Key, coldkey: Key, block: int, revision: str) -> "SubmissionStatus":
|
20 |
-
|
|
|
21 |
return SubmissionStatus.BLACKLISTED
|
22 |
|
23 |
if any(
|
24 |
submission.hotkey == hotkey and submission.revision == revision
|
25 |
-
for submission in
|
26 |
):
|
27 |
return SubmissionStatus.DUPLICATE
|
28 |
|
@@ -43,14 +45,17 @@ DROPDOWN_OPTIONS = [status.value[0] for status in SubmissionStatus]
|
|
43 |
|
44 |
def create_submissions(submission_filters: list[str]) -> gr.Dataframe:
|
45 |
data: list[list] = []
|
46 |
-
|
47 |
runs = sorted(get_current_runs(), key=lambda run: run.uid)
|
48 |
|
49 |
for hotkey, commitment in COMMITMENTS.items():
|
50 |
-
|
|
|
|
|
|
|
51 |
|
52 |
row = [
|
53 |
-
|
54 |
f"[{'/'.join(commitment.get_repo_link().split('/')[-2:])}]({commitment.get_repo_link()})",
|
55 |
f"[{commitment.block}](https://taostats.io/block/{commitment.block}/extrinsics)",
|
56 |
f"[{commitment.revision}]({commitment.get_repo_link()}/commit/{commitment.revision})",
|
|
|
3 |
import gradio as gr
|
4 |
import pandas as pd
|
5 |
|
6 |
+
from chain_data import sync_chain, COMMITMENTS
|
7 |
from src import Key
|
8 |
+
from src.chain_data import get_neurons
|
9 |
+
from wandb_data import get_current_runs, Run, get_blacklisted_keys
|
10 |
|
11 |
|
12 |
class SubmissionStatus(Enum):
|
|
|
18 |
|
19 |
@staticmethod
|
20 |
def get_status(run: Run, hotkey: Key, coldkey: Key, block: int, revision: str) -> "SubmissionStatus":
|
21 |
+
blacklisted_keys = get_blacklisted_keys()
|
22 |
+
if hotkey in blacklisted_keys.hotkeys or coldkey in blacklisted_keys.coldkeys:
|
23 |
return SubmissionStatus.BLACKLISTED
|
24 |
|
25 |
if any(
|
26 |
submission.hotkey == hotkey and submission.revision == revision
|
27 |
+
for submission in blacklisted_keys.duplicates
|
28 |
):
|
29 |
return SubmissionStatus.DUPLICATE
|
30 |
|
|
|
45 |
|
46 |
def create_submissions(submission_filters: list[str]) -> gr.Dataframe:
|
47 |
data: list[list] = []
|
48 |
+
sync_chain()
|
49 |
runs = sorted(get_current_runs(), key=lambda run: run.uid)
|
50 |
|
51 |
for hotkey, commitment in COMMITMENTS.items():
|
52 |
+
neuron = get_neurons().get(hotkey)
|
53 |
+
if not neuron:
|
54 |
+
continue
|
55 |
+
coldkey = neuron.coldkey
|
56 |
|
57 |
row = [
|
58 |
+
neuron.uid,
|
59 |
f"[{'/'.join(commitment.get_repo_link().split('/')[-2:])}]({commitment.get_repo_link()})",
|
60 |
f"[{commitment.block}](https://taostats.io/block/{commitment.block}/extrinsics)",
|
61 |
f"[{commitment.revision}]({commitment.get_repo_link()}/commit/{commitment.revision})",
|
src/validator_states.py
CHANGED
@@ -4,7 +4,7 @@ import gradio as gr
|
|
4 |
import pandas as pd
|
5 |
from packaging import version
|
6 |
|
7 |
-
from chain_data import
|
8 |
from wandb_data import get_current_runs, Run
|
9 |
|
10 |
AVERAGE_BENCHMARKING_TIME_WARNING_THRESHOLD = 180 # 3 minutes
|
@@ -31,10 +31,10 @@ def create_validator_states() -> gr.Dataframe:
|
|
31 |
runs = sorted(get_current_runs(), key=lambda run: run.uid)
|
32 |
latest_version = get_latest_version(runs)
|
33 |
for run in runs:
|
34 |
-
if not run.hotkey in
|
35 |
continue
|
36 |
-
vtrust =
|
37 |
-
updated =
|
38 |
data.append([
|
39 |
run.uid,
|
40 |
run.name,
|
|
|
4 |
import pandas as pd
|
5 |
from packaging import version
|
6 |
|
7 |
+
from chain_data import get_neurons
|
8 |
from wandb_data import get_current_runs, Run
|
9 |
|
10 |
AVERAGE_BENCHMARKING_TIME_WARNING_THRESHOLD = 180 # 3 minutes
|
|
|
31 |
runs = sorted(get_current_runs(), key=lambda run: run.uid)
|
32 |
latest_version = get_latest_version(runs)
|
33 |
for run in runs:
|
34 |
+
if not run.hotkey in get_neurons():
|
35 |
continue
|
36 |
+
vtrust = get_neurons()[run.hotkey].validator_trust
|
37 |
+
updated = get_neurons()[run.hotkey].updated
|
38 |
data.append([
|
39 |
run.uid,
|
40 |
run.name,
|
src/validator_weights.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
3 |
|
4 |
-
from chain_data import WEIGHTS_BY_MINER,
|
5 |
from wandb_data import Key, get_current_runs
|
6 |
|
7 |
|
@@ -39,27 +39,28 @@ def get_active_weights() -> dict[Key, list[tuple[Key, Weight]]]:
|
|
39 |
|
40 |
def create_weights(include_inactive: bool) -> gr.Dataframe:
|
41 |
data: list[list] = []
|
42 |
-
|
43 |
|
44 |
headers = ["Miner UID", "Incentive"]
|
45 |
datatype = ["number", "markdown"]
|
46 |
|
47 |
weights = WEIGHTS_BY_MINER if include_inactive else get_active_weights()
|
48 |
|
|
|
49 |
validator_uids = set()
|
50 |
for _, validator_weights in weights.items():
|
51 |
-
for
|
52 |
-
validator_uids.add(
|
53 |
|
54 |
for validator_uid in sorted(validator_uids):
|
55 |
headers.append(str(validator_uid))
|
56 |
datatype.append("markdown")
|
57 |
|
58 |
for hotkey, validator_weights in weights.items():
|
59 |
-
if not hotkey in
|
60 |
continue
|
61 |
-
incentive =
|
62 |
-
row = [
|
63 |
for _, weight in validator_weights:
|
64 |
row.append(f"<span style='color: {get_color_by_weight(weight)}'>{weight:.{3}f}</span>")
|
65 |
data.append(row)
|
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
3 |
|
4 |
+
from chain_data import WEIGHTS_BY_MINER, get_neurons, sync_chain, Weight
|
5 |
from wandb_data import Key, get_current_runs
|
6 |
|
7 |
|
|
|
39 |
|
40 |
def create_weights(include_inactive: bool) -> gr.Dataframe:
|
41 |
data: list[list] = []
|
42 |
+
sync_chain()
|
43 |
|
44 |
headers = ["Miner UID", "Incentive"]
|
45 |
datatype = ["number", "markdown"]
|
46 |
|
47 |
weights = WEIGHTS_BY_MINER if include_inactive else get_active_weights()
|
48 |
|
49 |
+
neurons = get_neurons()
|
50 |
validator_uids = set()
|
51 |
for _, validator_weights in weights.items():
|
52 |
+
for hotkey, _ in validator_weights:
|
53 |
+
validator_uids.add(neurons[hotkey].uid)
|
54 |
|
55 |
for validator_uid in sorted(validator_uids):
|
56 |
headers.append(str(validator_uid))
|
57 |
datatype.append("markdown")
|
58 |
|
59 |
for hotkey, validator_weights in weights.items():
|
60 |
+
if not hotkey in neurons:
|
61 |
continue
|
62 |
+
incentive = neurons[hotkey].incentive
|
63 |
+
row = [neurons[hotkey].uid, f"<span style='color: {get_color_by_weight(incentive)}'>{incentive:.{3}f}</span>"]
|
64 |
for _, weight in validator_weights:
|
65 |
row.append(f"<span style='color: {get_color_by_weight(weight)}'>{weight:.{3}f}</span>")
|
66 |
data.append(row)
|
src/wandb_data.py
CHANGED
@@ -5,12 +5,14 @@ from enum import Enum
|
|
5 |
|
6 |
import requests
|
7 |
import wandb
|
|
|
8 |
import wandb.apis.public as wapi
|
9 |
from pydantic import BaseModel, RootModel
|
10 |
from substrateinterface import Keypair
|
11 |
|
12 |
-
from chain_data import
|
13 |
from src import TIMEZONE, Key
|
|
|
14 |
|
15 |
WANDB_RUN_PATH = os.environ["WANDB_RUN_PATH"]
|
16 |
|
@@ -19,8 +21,7 @@ OFFSET_DAYS = 0
|
|
19 |
|
20 |
BLACKLIST_ENDPOINT = "https://edge-inputs.api.wombo.ai/blacklist"
|
21 |
DUPLICATE_SUBMISSIONS_ENDPOINT = "https://edge-inputs.api.wombo.ai/duplicate_submissions"
|
22 |
-
|
23 |
-
BLACKLISTED_HOTKEYS = set()
|
24 |
|
25 |
class DuplicateSubmission(BaseModel):
|
26 |
hotkey: Key
|
@@ -28,7 +29,10 @@ class DuplicateSubmission(BaseModel):
|
|
28 |
revision: str
|
29 |
copy_of: Key
|
30 |
|
31 |
-
|
|
|
|
|
|
|
32 |
|
33 |
|
34 |
class BenchmarkStatus(Enum):
|
@@ -160,11 +164,11 @@ def _add_runs(wandb_runs: list[wapi.Run]):
|
|
160 |
|
161 |
if "submissions" in metrics:
|
162 |
for hotkey, submission in metrics["submissions"].items():
|
163 |
-
|
164 |
-
if not
|
165 |
continue
|
166 |
submission_info[hotkey] = SubmissionInfo(
|
167 |
-
uid=uid,
|
168 |
repository=submission["repository_info"]["url"],
|
169 |
revision=submission["repository_info"]["revision"],
|
170 |
block=submission["block"],
|
@@ -275,22 +279,25 @@ def _fetch_current_runs(wandb_api: wandb.Api):
|
|
275 |
)
|
276 |
_add_runs(wandb_runs)
|
277 |
|
278 |
-
|
279 |
-
def
|
280 |
response = requests.get(BLACKLIST_ENDPOINT)
|
281 |
response.raise_for_status()
|
282 |
data = response.json()
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
BLACKLISTED_HOTKEYS.add(hotkey)
|
287 |
-
for coldkey in data["coldkeys"]:
|
288 |
-
BLACKLISTED_COLDKEYS.add(coldkey)
|
289 |
|
290 |
response = requests.get(DUPLICATE_SUBMISSIONS_ENDPOINT)
|
291 |
response.raise_for_status()
|
292 |
-
|
293 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
294 |
|
295 |
|
296 |
last_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
|
@@ -309,11 +316,10 @@ def sync():
|
|
309 |
_fetch_history(wandb_api)
|
310 |
else:
|
311 |
_fetch_current_runs(wandb_api)
|
312 |
-
_fetch_blacklisted_keys()
|
313 |
|
314 |
|
315 |
def get_current_runs() -> list[Run]:
|
316 |
-
|
317 |
sync()
|
318 |
|
319 |
contest_start = _get_contest_start() - timedelta(days=OFFSET_DAYS)
|
|
|
5 |
|
6 |
import requests
|
7 |
import wandb
|
8 |
+
from cachetools import TTLCache, cached
|
9 |
import wandb.apis.public as wapi
|
10 |
from pydantic import BaseModel, RootModel
|
11 |
from substrateinterface import Keypair
|
12 |
|
13 |
+
from chain_data import VALIDATOR_IDENTITIES, sync_chain
|
14 |
from src import TIMEZONE, Key
|
15 |
+
from src.chain_data import get_neurons
|
16 |
|
17 |
WANDB_RUN_PATH = os.environ["WANDB_RUN_PATH"]
|
18 |
|
|
|
21 |
|
22 |
BLACKLIST_ENDPOINT = "https://edge-inputs.api.wombo.ai/blacklist"
|
23 |
DUPLICATE_SUBMISSIONS_ENDPOINT = "https://edge-inputs.api.wombo.ai/duplicate_submissions"
|
24 |
+
|
|
|
25 |
|
26 |
class DuplicateSubmission(BaseModel):
|
27 |
hotkey: Key
|
|
|
29 |
revision: str
|
30 |
copy_of: Key
|
31 |
|
32 |
+
class Blacklist(BaseModel):
|
33 |
+
coldkeys: set[Key]
|
34 |
+
hotkeys: set[Key]
|
35 |
+
duplicates: list[DuplicateSubmission]
|
36 |
|
37 |
|
38 |
class BenchmarkStatus(Enum):
|
|
|
164 |
|
165 |
if "submissions" in metrics:
|
166 |
for hotkey, submission in metrics["submissions"].items():
|
167 |
+
neuron = get_neurons().get(hotkey)
|
168 |
+
if not neuron:
|
169 |
continue
|
170 |
submission_info[hotkey] = SubmissionInfo(
|
171 |
+
uid=neuron.uid,
|
172 |
repository=submission["repository_info"]["url"],
|
173 |
revision=submission["repository_info"]["revision"],
|
174 |
block=submission["block"],
|
|
|
279 |
)
|
280 |
_add_runs(wandb_runs)
|
281 |
|
282 |
+
@cached(cache=TTLCache(maxsize=1, ttl=300))
|
283 |
+
def get_blacklisted_keys() -> Blacklist:
|
284 |
response = requests.get(BLACKLIST_ENDPOINT)
|
285 |
response.raise_for_status()
|
286 |
data = response.json()
|
287 |
+
|
288 |
+
blacklist_hotkeys = set(data["hotkeys"])
|
289 |
+
blacklist_coldkeys = set(data["coldkeys"])
|
|
|
|
|
|
|
290 |
|
291 |
response = requests.get(DUPLICATE_SUBMISSIONS_ENDPOINT)
|
292 |
response.raise_for_status()
|
293 |
+
|
294 |
+
duplicate_submissions = RootModel[list[DuplicateSubmission]].model_validate_json(response.text).root
|
295 |
+
|
296 |
+
return Blacklist(
|
297 |
+
hotkeys=blacklist_hotkeys,
|
298 |
+
coldkeys=blacklist_coldkeys,
|
299 |
+
duplicates=duplicate_submissions
|
300 |
+
)
|
301 |
|
302 |
|
303 |
last_sync: datetime = datetime.fromtimestamp(0, TIMEZONE)
|
|
|
316 |
_fetch_history(wandb_api)
|
317 |
else:
|
318 |
_fetch_current_runs(wandb_api)
|
|
|
319 |
|
320 |
|
321 |
def get_current_runs() -> list[Run]:
|
322 |
+
sync_chain()
|
323 |
sync()
|
324 |
|
325 |
contest_start = _get_contest_start() - timedelta(days=OFFSET_DAYS)
|