File size: 3,004 Bytes
8c49cb6
 
 
 
 
df66f6e
314f91a
b1a1395
b2ed338
8c49cb6
 
a0b0f73
c1b8a96
2dadfdf
a0b0f73
a61a794
9a865e5
 
 
 
5b29e88
8c49cb6
f81a7fd
 
b1a1395
8c49cb6
a920cc8
70f791a
0e04e71
d1cea3f
0e04e71
 
be0a40e
2b1b9e2
a01b95f
a920cc8
 
8c49cb6
 
 
 
8b28d2b
8c49cb6
 
adb0416
c1b8a96
cb0020a
cc1ef8b
db0913f
8c49cb6
 
574f1e1
270b1f6
 
 
 
8c49cb6
270b1f6
 
 
 
8c49cb6
270b1f6
8c49cb6
 
 
eed1ccd
8c49cb6
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import json
import os

import pandas as pd

from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn, EvalQueueColumn
from src.leaderboard.read_evals import get_raw_eval_results
from src.about import Tasks, N_Tasks, Detail_Tasks


def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list, version="1_correct") -> pd.DataFrame:
    """Creates a dataframe from all the individual experiment results"""
    cols = cols.copy()
    raw_data = get_raw_eval_results(results_path+"/"+version, requests_path)
    print(raw_data)
    
    tasks = list(N_Tasks) + list(Detail_Tasks) if "n_" in version else list(Tasks)
    if version == "1_correct_var":
        tasks = [t for t in tasks if t.value.col_name != "VCR"]
    all_data_json = [v.to_dict(tasks) for v in raw_data]

    print(all_data_json)

    df = pd.DataFrame.from_records(all_data_json)
    df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
    
    print(cols)
    if version != "1_correct":
        cols.remove("VCR")
        if version != "1_correct_var":
            benchmark_cols.remove("VCR")
    if version != "n_correct":
        for task in Detail_Tasks:
            cols.remove(task.value.col_name)
    print(df)
    print(cols)
    df = df[cols].round(decimals=2)

    # filter out if any of the benchmarks have not been produced
    df = df[has_no_nan_values(df, benchmark_cols)]
    return df


def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
    """Creates the different dataframes for the evaluation queues requestes"""
    version = ["1_correct",] #  "1_correct_var", "n_correct"]
    entries = [os.path.join(v, entry) for v in version for entry in os.listdir(os.path.join(save_path, v)) if not entry.startswith(".")]
    print(entries)
    all_evals = []

    for entry in entries:
        if not ".txt" in entry:
            file_path = os.path.join(save_path, entry, "eval_request.json")
            with open(file_path) as fp:
                data = json.load(fp)

            data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
            data[EvalQueueColumn.revision.name] = data.get("revision", "main")
            data[EvalQueueColumn.output_format.name] = data.get("output_format")
            data[EvalQueueColumn.dataset_version.name] = file_path.split("/")[-3]

            all_evals.append(data)

    pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
    running_list = [e for e in all_evals if e["status"] == "RUNNING"]
    finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
    df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
    df_running = pd.DataFrame.from_records(running_list, columns=cols)
    df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
    return df_finished[cols], df_running[cols], df_pending[cols]