File size: 3,863 Bytes
4a78d34
 
 
b1f9063
4a78d34
 
bbde2b0
4a78d34
 
 
 
 
bbde2b0
 
 
a2189ab
 
36244aa
bbde2b0
 
0796d85
a319d81
bbde2b0
 
7c6bd6c
bbde2b0
 
 
 
 
 
323e17d
 
37ebe4e
bbde2b0
7c6bd6c
8ad1a09
4a78d34
 
 
 
 
dea22be
8ad1a09
 
 
 
4a78d34
a319d81
d7d56ae
bbde2b0
 
a319d81
d7d56ae
4a78d34
 
 
 
 
 
 
 
 
 
 
 
 
6bf1f8e
 
4a78d34
 
 
 
 
 
 
 
 
 
 
6bf1f8e
4a78d34
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import json
import os

import numpy as np
import pandas as pd

from src.about import Tasks
from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn, EvalQueueColumn
from src.leaderboard.read_evals import get_raw_eval_results


TASK_NAME_INVERSE_MAP = dict()
for task in Tasks:
    TASK_NAME_INVERSE_MAP[task.value.col_name] = {
        "name": task.value.benchmark,
        "type": task.value.type,
        "source": task.value.source,
    }

EMPTY_SYMBOL = "--"


def get_inspect_log_url(model_name: str, benchmark_name: str) -> str:
    """Returns the URL to the log file for a given model and benchmark"""
    with open("./inspect_log_file_names.json", "r") as f:
        inspect_log_files = json.load(f)
    log_file_name = inspect_log_files[model_name].get(benchmark_name, None)
    if log_file_name is None:
        return ""
    else:
        # replace .json with .eval
        log_file_name = log_file_name.replace(".json", ".eval")
        return f"https://storage.googleapis.com/inspect-evals/eval/{model_name}/index.html?log_file=logs/logs/{log_file_name}"


def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
    """Creates a dataframe from all the individual experiment results"""
    raw_data = get_raw_eval_results(results_path, requests_path)
    all_data_json = [v.to_dict() for v in raw_data]

    df = pd.DataFrame.from_records(all_data_json)

    df = df[cols].round(decimals=2)

    # subset for model and benchmark cols
    df = df[[AutoEvalColumn.model.name] + benchmark_cols]

    df = df.fillna(EMPTY_SYMBOL)

    # make values clickable and link to log files
    for col in benchmark_cols:
        df[col] = df[[AutoEvalColumn.model.name, col]].apply(lambda x: f"[{x[col]}]({get_inspect_log_url(model_name=x[AutoEvalColumn.model.name].split('>')[1].split('<')[0], benchmark_name=TASK_NAME_INVERSE_MAP[col]['name'])})" if x[col] != EMPTY_SYMBOL else x[col], axis=1)

    return df


def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
    """Creates the different dataframes for the evaluation queues requestes"""
    entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
    all_evals = []

    for entry in entries:
        if ".json" in entry:
            file_path = os.path.join(save_path, entry)
            with open(file_path) as fp:
                data = json.load(fp)

            data[EvalQueueColumn.model.name] = make_clickable_model(data["model"], data["model_sha"])
            data[EvalQueueColumn.revision.name] = data.get("revision", "main")

            all_evals.append(data)
        elif ".md" not in entry:
            # this is a folder
            sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
            for sub_entry in sub_entries:
                file_path = os.path.join(save_path, entry, sub_entry)
                with open(file_path) as fp:
                    data = json.load(fp)

                data[EvalQueueColumn.model.name] = make_clickable_model(data["model"], data["model_sha"])
                data[EvalQueueColumn.revision.name] = data.get("revision", "main")
                all_evals.append(data)

    pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
    running_list = [e for e in all_evals if e["status"] == "RUNNING"]
    finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
    df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
    df_running = pd.DataFrame.from_records(running_list, columns=cols)
    df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
    return df_finished[cols], df_running[cols], df_pending[cols]