edge-maxxing-dashboard / src /leaderboard.py
AlexNijjar's picture
Remove ranks
c638797
import os
import gradio as gr
import pandas as pd
from src import Uid
from wandb_data import get_current_runs
DEFAULT_VALIDATOR_UID = int(os.environ["DEFAULT_VALIDATOR_UID"])
def create_dropdown() -> gr.Dropdown:
choices: list[tuple[str, int]] = []
runs = get_current_runs()
for run in runs:
pretty_name = f"{run.uid} - {run.name} ({run.status.name()})"
choices.append((pretty_name, run.uid))
choices = sorted(choices, key=lambda x: x[1])
default = DEFAULT_VALIDATOR_UID
if default not in [uid for _, uid in choices]:
default = choices[0][1]
return gr.Dropdown(
choices,
value=default,
interactive=True,
label="Source Validator"
)
def create_leaderboard(validator_uid: Uid) -> gr.Dataframe:
data: list[list] = []
runs = get_current_runs()
for run in runs:
if run.uid != validator_uid:
continue
for hotkey, submission in run.submissions.items():
data.append([
submission.info.uid,
f"[{'/'.join(submission.info.repository.split('/')[-2:])}]({submission.info.repository})",
round(submission.score, 5),
f"{submission.metrics.generation_time:.4f}s",
f"{submission.average_similarity * 100:.4f}%",
f"{submission.metrics.size / 1024 ** 3:.4f}GB",
f"{submission.metrics.vram_used / 1024 ** 3:.4f}GB",
f"{submission.metrics.ram_used / 1024 ** 3:.4f}GB",
f"{submission.metrics.watts_used:.3f}W",
f"{submission.metrics.load_time:.3f}s",
f"[{submission.info.block}](https://taostats.io/block/{submission.info.block}/extrinsics)",
f"[{submission.info.revision}]({submission.info.repository}/commit/{submission.info.revision})",
f"[{hotkey[:6]}...](https://taostats.io/hotkey/{hotkey})",
])
data.sort(key=lambda x: x[2], reverse=True)
return gr.Dataframe(
pd.DataFrame(data, columns=["UID", "Model", "Score", "Gen Time", "Similarity", "Size", "VRAM Used", "RAM Used", "Power Used", "Load Time", "Block", "Revision", "Hotkey"]),
datatype=["number", "markdown", "number", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown"],
interactive=False,
max_height=800,
)
def create_baseline(validator_uid: Uid) -> gr.Dataframe:
data: list[list] = []
runs = get_current_runs()
for run in runs:
if run.uid != validator_uid:
continue
data.append([
f"{run.baseline_metrics.generation_time:.4f}s",
f"{run.baseline_metrics.size / 1024 ** 3:.4f}GB",
f"{run.baseline_metrics.vram_used / 1024 ** 3:.4f}GB",
f"{run.baseline_metrics.ram_used / 1024 ** 3:.4f}GB",
f"{run.baseline_metrics.watts_used:.3f}W",
f"{run.baseline_metrics.load_time:.3f}s",
])
return gr.Dataframe(
pd.DataFrame(data, columns=["Gen Time", "Size", "VRAM Used", "RAM Used", "Power Used", "Load Time"]),
datatype=["number", "markdown", "markdown", "markdown", "markdown", "markdown"],
interactive=False,
label="Baseline",
)