Spaces:
Sleeping
Sleeping
File size: 3,088 Bytes
8c49cb6 df66f6e 314f91a b1a1395 b2ed338 8c49cb6 a0b0f73 c1b8a96 2dadfdf a0b0f73 a61a794 b2ed338 5b29e88 8c49cb6 f81a7fd b1a1395 8c49cb6 a920cc8 70f791a d1cea3f adecec6 a01b95f 2b1b9e2 a01b95f a920cc8 8c49cb6 8b28d2b 8c49cb6 adb0416 c1b8a96 8c49cb6 eed1ccd 8c49cb6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import json
import os
import pandas as pd
from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn, EvalQueueColumn
from src.leaderboard.read_evals import get_raw_eval_results
from src.about import Tasks, N_Tasks, Detail_Tasks
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list, version="1_correct") -> pd.DataFrame:
"""Creates a dataframe from all the individual experiment results"""
cols = cols.copy()
raw_data = get_raw_eval_results(results_path+"/"+version, requests_path)
print(raw_data)
tasks = Tasks if version == "1_correct" else list(N_Tasks) + list(Detail_Tasks)
all_data_json = [v.to_dict(tasks) for v in raw_data]
print(all_data_json)
df = pd.DataFrame.from_records(all_data_json)
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
print(cols)
if version != "1_correct":
cols.remove("VCR")
benchmark_cols.remove("VCR")
else:
for task in Detail_Tasks:
cols.remove(task.value.col_name)
print(df)
print(cols)
df = df[cols].round(decimals=2)
# filter out if any of the benchmarks have not been produced
df = df[has_no_nan_values(df, benchmark_cols)]
return df
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
"""Creates the different dataframes for the evaluation queues requestes"""
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
all_evals = []
for entry in entries:
if ".json" in entry:
file_path = os.path.join(save_path, entry)
with open(file_path) as fp:
data = json.load(fp)
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
all_evals.append(data)
elif ".md" not in entry:
# this is a folder
sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
for sub_entry in sub_entries:
file_path = os.path.join(save_path, entry, sub_entry)
with open(file_path) as fp:
data = json.load(fp)
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
all_evals.append(data)
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
df_running = pd.DataFrame.from_records(running_list, columns=cols)
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
return df_finished[cols], df_running[cols], df_pending[cols]
|