AlexNijjar commited on
Commit
c638797
·
1 Parent(s): 63f8f2e

Remove ranks

Browse files
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🏆
4
  colorFrom: purple
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 5.8.0
8
  app_file: src/app.py
9
  pinned: true
10
  ---
 
4
  colorFrom: purple
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 5.9.1
8
  app_file: src/app.py
9
  pinned: true
10
  ---
requirements.txt CHANGED
@@ -1,8 +1,8 @@
1
- fiber @ git+https://github.com/rayonlabs/fiber.git@1.0.0#egg=fiber[chain]
2
- gradio==5.8.0
3
- wandb==0.18.5
4
  substrate-interface==1.7.10
5
  plotly==5.24.1
6
  pandas==2.2.3
7
- packaging==24.1
8
  netaddr==1.3.0
 
1
+ fiber @ git+https://github.com/rayonlabs/fiber.git@2.0.1#egg=fiber[chain]
2
+ gradio==5.9.1
3
+ wandb==0.19.1
4
  substrate-interface==1.7.10
5
  plotly==5.24.1
6
  pandas==2.2.3
7
+ packaging==24.2
8
  netaddr==1.3.0
src/leaderboard.py CHANGED
@@ -39,7 +39,6 @@ def create_leaderboard(validator_uid: Uid) -> gr.Dataframe:
39
  data.append([
40
  submission.info.uid,
41
  f"[{'/'.join(submission.info.repository.split('/')[-2:])}]({submission.info.repository})",
42
- submission.rank + 1,
43
  round(submission.score, 5),
44
  f"{submission.metrics.generation_time:.4f}s",
45
  f"{submission.average_similarity * 100:.4f}%",
@@ -53,11 +52,11 @@ def create_leaderboard(validator_uid: Uid) -> gr.Dataframe:
53
  f"[{hotkey[:6]}...](https://taostats.io/hotkey/{hotkey})",
54
  ])
55
 
56
- data.sort(key=lambda x: (x[2], int(x[11].split('[')[1].split(']')[0])))
57
 
58
  return gr.Dataframe(
59
- pd.DataFrame(data, columns=["UID", "Model", "Rank", "Score", "Gen Time", "Similarity", "Size", "VRAM Used", "RAM Used", "Power Used", "Load Time", "Block", "Revision", "Hotkey"]),
60
- datatype=["number", "markdown", "number", "number", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown"],
61
  interactive=False,
62
  max_height=800,
63
  )
 
39
  data.append([
40
  submission.info.uid,
41
  f"[{'/'.join(submission.info.repository.split('/')[-2:])}]({submission.info.repository})",
 
42
  round(submission.score, 5),
43
  f"{submission.metrics.generation_time:.4f}s",
44
  f"{submission.average_similarity * 100:.4f}%",
 
52
  f"[{hotkey[:6]}...](https://taostats.io/hotkey/{hotkey})",
53
  ])
54
 
55
+ data.sort(key=lambda x: x[2], reverse=True)
56
 
57
  return gr.Dataframe(
58
+ pd.DataFrame(data, columns=["UID", "Model", "Score", "Gen Time", "Similarity", "Size", "VRAM Used", "RAM Used", "Power Used", "Load Time", "Block", "Revision", "Hotkey"]),
59
+ datatype=["number", "markdown", "number", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown"],
60
  interactive=False,
61
  max_height=800,
62
  )
src/validator_states.py CHANGED
@@ -58,4 +58,5 @@ def create_validator_states() -> gr.Dataframe:
58
  datatype=["number", "markdown", "markdown", "markdown", "markdown", "number", "number", "markdown", "markdown", "number", "markdown", "markdown"],
59
  interactive=False,
60
  max_height=800,
 
61
  )
 
58
  datatype=["number", "markdown", "markdown", "markdown", "markdown", "number", "number", "markdown", "markdown", "number", "markdown", "markdown"],
59
  interactive=False,
60
  max_height=800,
61
+ row_count=1,
62
  )
src/wandb_data.py CHANGED
@@ -65,7 +65,6 @@ class Submission:
65
  metrics: Metrics
66
  average_similarity: float
67
  min_similarity: float
68
- rank: int
69
  score: float
70
 
71
 
@@ -162,14 +161,11 @@ def _add_runs(wandb_runs: list[wapi.Run]):
162
  block=submission["block"],
163
  )
164
 
165
- if "benchmarks" in metrics and "ranks" in metrics:
166
  for hotkey, benchmark in metrics["benchmarks"].items():
167
  benchmark_metrics = benchmark["metrics"]
168
  if hotkey not in submission_info:
169
  continue
170
- ranks = metrics["ranks"]
171
- if hotkey not in ranks:
172
- continue
173
  scores = metrics["scores"]
174
  if hotkey not in scores:
175
  continue
@@ -185,7 +181,6 @@ def _add_runs(wandb_runs: list[wapi.Run]):
185
  ),
186
  average_similarity=float(benchmark["average_similarity"]),
187
  min_similarity=float(benchmark["min_similarity"]),
188
- rank=int(ranks[hotkey]),
189
  score=float(scores[hotkey]),
190
  )
191
 
@@ -197,9 +192,9 @@ def _add_runs(wandb_runs: list[wapi.Run]):
197
  ...
198
 
199
  status = _status_from_run(wandb_run)
200
- winner_submission = min(
201
  submissions.values(),
202
- key=lambda submission: (submission.rank, submission.info.block),
203
  default=None
204
  )
205
  winner_uid = winner_submission.info.uid if winner_submission else None
 
65
  metrics: Metrics
66
  average_similarity: float
67
  min_similarity: float
 
68
  score: float
69
 
70
 
 
161
  block=submission["block"],
162
  )
163
 
164
+ if "benchmarks" in metrics:
165
  for hotkey, benchmark in metrics["benchmarks"].items():
166
  benchmark_metrics = benchmark["metrics"]
167
  if hotkey not in submission_info:
168
  continue
 
 
 
169
  scores = metrics["scores"]
170
  if hotkey not in scores:
171
  continue
 
181
  ),
182
  average_similarity=float(benchmark["average_similarity"]),
183
  min_similarity=float(benchmark["min_similarity"]),
 
184
  score=float(scores[hotkey]),
185
  )
186
 
 
192
  ...
193
 
194
  status = _status_from_run(wandb_run)
195
+ winner_submission = max(
196
  submissions.values(),
197
+ key=lambda submission: submission.score,
198
  default=None
199
  )
200
  winner_uid = winner_submission.info.uid if winner_submission else None