AlexNijjar commited on
Commit
163756c
·
1 Parent(s): 77979e4

Add ram metric

Browse files
Files changed (2) hide show
  1. src/leaderboard.py +4 -3
  2. src/wandb_data.py +9 -6
src/leaderboard.py CHANGED
@@ -44,6 +44,7 @@ def create_leaderboard(validator_uid) -> gr.Dataframe:
44
  f"{submission.average_similarity * 100:.4f}%",
45
  f"{submission.metrics.size / 1024 ** 3:.4f}GB",
46
  f"{submission.metrics.vram_used / 1024 ** 3:.4f}GB",
 
47
  f"{submission.metrics.watts_used:.3f}W",
48
  f"{submission.metrics.load_time:.3f}s",
49
  f"[{submission.info.block}](https://taostats.io/block/{submission.info.block})",
@@ -51,11 +52,11 @@ def create_leaderboard(validator_uid) -> gr.Dataframe:
51
  f"[{hotkey[:6]}...](https://taostats.io/hotkey/{hotkey})",
52
  ])
53
 
54
- data.sort(key=lambda x: (x[2], int(x[10].split('[')[1].split(']')[0])))
55
 
56
  return gr.Dataframe(
57
- pd.DataFrame(data, columns=["UID", "Model", "Rank", "Score", "Gen Time", "Similarity", "Size", "VRAM Usage", "Power Usage", "Load Time", "Block", "Revision", "Hotkey"]),
58
- datatype=["number", "markdown", "number", "number", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown"],
59
  interactive=False,
60
  max_height=800,
61
  )
 
44
  f"{submission.average_similarity * 100:.4f}%",
45
  f"{submission.metrics.size / 1024 ** 3:.4f}GB",
46
  f"{submission.metrics.vram_used / 1024 ** 3:.4f}GB",
47
+ f"{submission.metrics.ram_used / 1024 ** 3:.4f}GB",
48
  f"{submission.metrics.watts_used:.3f}W",
49
  f"{submission.metrics.load_time:.3f}s",
50
  f"[{submission.info.block}](https://taostats.io/block/{submission.info.block})",
 
52
  f"[{hotkey[:6]}...](https://taostats.io/hotkey/{hotkey})",
53
  ])
54
 
55
+ data.sort(key=lambda x: (x[2], int(x[11].split('[')[1].split(']')[0])))
56
 
57
  return gr.Dataframe(
58
+ pd.DataFrame(data, columns=["UID", "Model", "Rank", "Score", "Gen Time", "Similarity", "Size", "VRAM Used", "RAM Used", "Power Used", "Load Time", "Block", "Revision", "Hotkey"]),
59
+ datatype=["number", "markdown", "number", "number", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown"],
60
  interactive=False,
61
  max_height=800,
62
  )
src/wandb_data.py CHANGED
@@ -42,12 +42,13 @@ class BenchmarkStatus(Enum):
42
 
43
 
44
  @dataclass
45
- class MetricData:
46
  generation_time: float
47
  vram_used: float
48
  watts_used: float
49
  load_time: float
50
  size: int
 
51
 
52
 
53
  @dataclass
@@ -61,7 +62,7 @@ class SubmissionInfo:
61
  @dataclass
62
  class Submission:
63
  info: SubmissionInfo
64
- metrics: MetricData
65
  average_similarity: float
66
  min_similarity: float
67
  rank: int
@@ -80,7 +81,7 @@ class Run:
80
  step: int
81
  eta: int
82
  winner_uid: int | None
83
- baseline_metrics: MetricData | None
84
  total_submissions: int
85
  submissions: dict[Key, Submission]
86
  invalid_submissions: set[Key]
@@ -137,12 +138,13 @@ def _add_runs(wandb_runs: list[wapi.Run]):
137
  submissions: dict[Key, Submission] = {}
138
  invalid_submissions: set[Key] = set()
139
 
140
- baseline_metrics: MetricData | None = None
141
  if "baseline" in metrics:
142
  baseline = metrics["baseline"]
143
- baseline_metrics = MetricData(
144
  generation_time=float(baseline["generation_time"]),
145
  vram_used=float(baseline["vram_used"]),
 
146
  watts_used=float(baseline["watts_used"]),
147
  load_time=float(baseline["load_time"]),
148
  size=int(baseline["size"]),
@@ -173,9 +175,10 @@ def _add_runs(wandb_runs: list[wapi.Run]):
173
  continue
174
  submissions[hotkey] = Submission(
175
  info=submission_info[hotkey],
176
- metrics=MetricData(
177
  generation_time=float(benchmark_metrics["generation_time"]),
178
  vram_used=float(benchmark_metrics["vram_used"]),
 
179
  watts_used=float(benchmark_metrics["watts_used"]),
180
  load_time=float(benchmark_metrics["load_time"]),
181
  size=int(benchmark_metrics["size"]),
 
42
 
43
 
44
  @dataclass
45
+ class Metrics:
46
  generation_time: float
47
  vram_used: float
48
  watts_used: float
49
  load_time: float
50
  size: int
51
+ ram_used: float
52
 
53
 
54
  @dataclass
 
62
  @dataclass
63
  class Submission:
64
  info: SubmissionInfo
65
+ metrics: Metrics
66
  average_similarity: float
67
  min_similarity: float
68
  rank: int
 
81
  step: int
82
  eta: int
83
  winner_uid: int | None
84
+ baseline_metrics: Metrics | None
85
  total_submissions: int
86
  submissions: dict[Key, Submission]
87
  invalid_submissions: set[Key]
 
138
  submissions: dict[Key, Submission] = {}
139
  invalid_submissions: set[Key] = set()
140
 
141
+ baseline_metrics: Metrics | None = None
142
  if "baseline" in metrics:
143
  baseline = metrics["baseline"]
144
+ baseline_metrics = Metrics(
145
  generation_time=float(baseline["generation_time"]),
146
  vram_used=float(baseline["vram_used"]),
147
+ ram_used=float(baseline.get("ram_used", 0)),
148
  watts_used=float(baseline["watts_used"]),
149
  load_time=float(baseline["load_time"]),
150
  size=int(baseline["size"]),
 
175
  continue
176
  submissions[hotkey] = Submission(
177
  info=submission_info[hotkey],
178
+ metrics=Metrics(
179
  generation_time=float(benchmark_metrics["generation_time"]),
180
  vram_used=float(benchmark_metrics["vram_used"]),
181
+ ram_used=float(benchmark_metrics.get("ram_used", 0)),
182
  watts_used=float(benchmark_metrics["watts_used"]),
183
  load_time=float(benchmark_metrics["load_time"]),
184
  size=int(benchmark_metrics["size"]),