File size: 3,304 Bytes
6c858ba
 
 
 
 
4379767
6c858ba
 
 
 
b343c97
6c858ba
 
 
 
 
 
 
 
 
 
21a364f
6c858ba
 
 
 
 
 
 
 
b343c97
4379767
6c858ba
 
 
 
 
b343c97
6c858ba
 
 
77979e4
 
 
 
 
163756c
6c858ba
 
d87b43c
6c858ba
b343c97
6c858ba
 
c638797
6c858ba
 
c638797
 
6c858ba
8cdce17
6c858ba
4379767
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import os

import gradio as gr
import pandas as pd

from src import Uid
from wandb_data import get_current_runs

DEFAULT_VALIDATOR_UID = int(os.environ["DEFAULT_VALIDATOR_UID"])


def create_dropdown() -> gr.Dropdown:
    choices: list[tuple[str, int]] = []
    runs = get_current_runs()
    for run in runs:
        pretty_name = f"{run.uid} - {run.name} ({run.status.name()})"
        choices.append((pretty_name, run.uid))

    choices = sorted(choices, key=lambda x: x[1])

    default = DEFAULT_VALIDATOR_UID
    if default not in [uid for _, uid in choices]:
        default = choices[0][1]
    return gr.Dropdown(
        choices,
        value=default,
        interactive=True,
        label="Source Validator"
    )


def create_leaderboard(validator_uid: Uid) -> gr.Dataframe:
    data: list[list] = []
    runs = get_current_runs()
    for run in runs:
        if run.uid != validator_uid:
            continue
        for hotkey, submission in run.submissions.items():
            data.append([
                submission.info.uid,
                f"[{'/'.join(submission.info.repository.split('/')[-2:])}]({submission.info.repository})",
                round(submission.score, 5),
                f"{submission.metrics.generation_time:.4f}s",
                f"{submission.average_similarity * 100:.4f}%",
                f"{submission.metrics.size / 1024 ** 3:.4f}GB",
                f"{submission.metrics.vram_used / 1024 ** 3:.4f}GB",
                f"{submission.metrics.ram_used / 1024 ** 3:.4f}GB",
                f"{submission.metrics.watts_used:.3f}W",
                f"{submission.metrics.load_time:.3f}s",
                f"[{submission.info.block}](https://taostats.io/block/{submission.info.block}/extrinsics)",
                f"[{submission.info.revision}]({submission.info.repository}/commit/{submission.info.revision})",
                f"[{hotkey[:6]}...](https://taostats.io/hotkey/{hotkey})",
            ])

    data.sort(key=lambda x: x[2], reverse=True)

    return gr.Dataframe(
        pd.DataFrame(data, columns=["UID", "Model", "Score", "Gen Time", "Similarity", "Size", "VRAM Used", "RAM Used", "Power Used", "Load Time", "Block", "Revision", "Hotkey"]),
        datatype=["number", "markdown", "number", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown"],
        interactive=False,
        max_height=800,
    )


def create_baseline(validator_uid: Uid) -> gr.Dataframe:
    data: list[list] = []
    runs = get_current_runs()
    for run in runs:
        if run.uid != validator_uid:
            continue
        data.append([
            f"{run.baseline_metrics.generation_time:.4f}s",
            f"{run.baseline_metrics.size / 1024 ** 3:.4f}GB",
            f"{run.baseline_metrics.vram_used / 1024 ** 3:.4f}GB",
            f"{run.baseline_metrics.ram_used / 1024 ** 3:.4f}GB",
            f"{run.baseline_metrics.watts_used:.3f}W",
            f"{run.baseline_metrics.load_time:.3f}s",
        ])

    return gr.Dataframe(
        pd.DataFrame(data, columns=["Gen Time", "Size", "VRAM Used", "RAM Used", "Power Used", "Load Time"]),
        datatype=["number", "markdown", "markdown", "markdown", "markdown", "markdown"],
        interactive=False,
        label="Baseline",
    )