AlexNijjar commited on
Commit
2e5ce99
·
1 Parent(s): b343c97
Files changed (2) hide show
  1. src/validator_states.py +3 -2
  2. src/wandb_data.py +2 -0
src/validator_states.py CHANGED
@@ -46,13 +46,14 @@ def create_validator_states() -> gr.Dataframe:
46
  len(run.invalid_submissions),
47
  colorize(f"{timedelta(seconds=int(run.average_benchmarking_time))}", "orange" if run.average_benchmarking_time > AVERAGE_BENCHMARKING_TIME_WARNING_THRESHOLD else "springgreen" if run.average_benchmarking_time > 0 else "gray"),
48
  colorize(f"{timedelta(seconds=run.eta)}", "orange" if run.eta > ETA_WARNING_THRESHOLD else "springgreen" if run.eta > 0 else "gray"),
 
49
  colorize(f"{vtrust:.4f}", "springgreen" if vtrust > VTRUST_WARNING_THRESHOLD else "red"),
50
  colorize(updated, "springgreen" if updated < UPDATED_WARNING_THRESHOLD else "red"),
51
  ])
52
 
53
  return gr.Dataframe(
54
- pd.DataFrame(data, columns=["UID", "Name", "Version", "Status", "Winner", "Tested", "Invalid", "Avg. Benchmark Time", "ETA", "VTrust", "Updated"]),
55
- datatype=["number", "markdown", "markdown", "markdown", "markdown", "number", "number", "markdown", "markdown", "markdown", "markdown"],
56
  interactive=False,
57
  max_height=800,
58
  )
 
46
  len(run.invalid_submissions),
47
  colorize(f"{timedelta(seconds=int(run.average_benchmarking_time))}", "orange" if run.average_benchmarking_time > AVERAGE_BENCHMARKING_TIME_WARNING_THRESHOLD else "springgreen" if run.average_benchmarking_time > 0 else "gray"),
48
  colorize(f"{timedelta(seconds=run.eta)}", "orange" if run.eta > ETA_WARNING_THRESHOLD else "springgreen" if run.eta > 0 else "gray"),
49
+ run.step,
50
  colorize(f"{vtrust:.4f}", "springgreen" if vtrust > VTRUST_WARNING_THRESHOLD else "red"),
51
  colorize(updated, "springgreen" if updated < UPDATED_WARNING_THRESHOLD else "red"),
52
  ])
53
 
54
  return gr.Dataframe(
55
+ pd.DataFrame(data, columns=["UID", "Name", "Version", "Status", "Winner", "Tested", "Invalid", "Avg. Benchmark Time", "ETA", "Step", "VTrust", "Updated"]),
56
+ datatype=["number", "markdown", "markdown", "markdown", "markdown", "number", "number", "markdown", "markdown", "number", "markdown", "markdown"],
57
  interactive=False,
58
  max_height=800,
59
  )
src/wandb_data.py CHANGED
@@ -71,6 +71,7 @@ class Run:
71
  hotkey: str
72
  status: BenchmarkStatus
73
  average_benchmarking_time: float
 
74
  eta: int
75
  winner_uid: int | None
76
  baseline_metrics: MetricData | None
@@ -200,6 +201,7 @@ def _add_runs(wandb_runs: list[wapi.Run]):
200
  hotkey=hotkey,
201
  status=status,
202
  average_benchmarking_time=average_benchmarking_time,
 
203
  eta=max(int(average_benchmarking_time * (len(submission_info) - len(submissions) - len(invalid_submissions))) if average_benchmarking_time else 0, 0) if status != BenchmarkStatus.FINISHED else 0,
204
  winner_uid=winner_uid,
205
  baseline_metrics=baseline_metrics,
 
71
  hotkey: str
72
  status: BenchmarkStatus
73
  average_benchmarking_time: float
74
+ step: int
75
  eta: int
76
  winner_uid: int | None
77
  baseline_metrics: MetricData | None
 
201
  hotkey=hotkey,
202
  status=status,
203
  average_benchmarking_time=average_benchmarking_time,
204
+ step=int(metrics["step"]),
205
  eta=max(int(average_benchmarking_time * (len(submission_info) - len(submissions) - len(invalid_submissions))) if average_benchmarking_time else 0, 0) if status != BenchmarkStatus.FINISHED else 0,
206
  winner_uid=winner_uid,
207
  baseline_metrics=baseline_metrics,