kaz-llm-lb / src /leaderboard /build_leaderboard.py
kz-transformers's picture
Update src/leaderboard/build_leaderboard.py
e0fde8d verified
raw
history blame
3.86 kB
import json
import logging
import os
import time
import pandas as pd
from huggingface_hub import snapshot_download
from src.envs import DATA_PATH, HF_TOKEN_PRIVATE
# Configure logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
def time_diff_wrapper(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
diff = end_time - start_time
logging.info("Time taken for %s: %s seconds", func.__name__, diff)
return result
return wrapper
def chmod_recursive(path, mode):
os.chmod(path, mode)
for root, dirs, files in os.walk(path):
for dir in dirs:
os.chmod(os.path.join(root, dir), mode)
for file in files:
os.chmod(os.path.join(root, file), mode)
@time_diff_wrapper
def download_dataset(repo_id, local_dir, repo_type="dataset", max_attempts=3, backoff_factor=1.5):
"""Download dataset with exponential backoff retries."""
os.makedirs(local_dir,777, exist_ok=True)
os.makedirs('./tmp',777, exist_ok=True)
attempt = 0
while attempt < max_attempts:
try:
logging.info("Downloading %s to %s", repo_id, local_dir)
snapshot_download(
repo_id=repo_id,
local_dir=local_dir,
cache_dir='./tmp',
repo_type=repo_type,
tqdm_class=None,
token=HF_TOKEN_PRIVATE,
etag_timeout=30,
max_workers=8,
force_download=True,
local_dir_use_symlinks=False
)
logging.info("Download successful")
return
except Exception as e:
wait_time = backoff_factor**attempt
logging.error("Error downloading %s: %s, retrying in %ss", repo_id, e, wait_time)
time.sleep(wait_time)
attempt += 1
logging.error("Failed to download %s after %s attempts", repo_id, max_attempts)
def download_openbench():
# download prev autogenerated leaderboard files
download_dataset("kz-transformers/kaz-llm-lb-metainfo", DATA_PATH)
# download answers of different models that we trust
download_dataset("kz-transformers/s-openbench-eval", "m_data")
def build_leadearboard_df():
# Retrieve the leaderboard DataFrame
initial_file_path = f"{os.path.abspath(DATA_PATH)}/leaderboard.json"
print(f'READING INITIAL LB STATE FROM: {initial_file_path}')
with open(initial_file_path, "r", encoding="utf-8") as eval_file:
f=json.load(eval_file)
print(f)
df = pd.DataFrame.from_records(f)
print('FIRST DF READING: ', df.columns, df.shape)
leaderboard_df = df[['model', 'mmlu_translated_kk', 'kk_constitution_mc', 'kk_dastur_mc', 'kazakh_and_literature_unt_mc', 'kk_geography_unt_mc',
'kk_world_history_unt_mc', 'kk_history_of_kazakhstan_unt_mc', 'kk_english_unt_mc', 'kk_biology_unt_mc',
'kk_human_society_rights_unt_mc', 'model_dtype','ppl']]
leaderboard_df['avg'] = leaderboard_df[[
'mmlu_translated_kk', 'kk_constitution_mc', 'kk_dastur_mc', 'kazakh_and_literature_unt_mc', 'kk_geography_unt_mc',
'kk_world_history_unt_mc', 'kk_history_of_kazakhstan_unt_mc', 'kk_english_unt_mc', 'kk_biology_unt_mc',
'kk_human_society_rights_unt_mc']].mean(axis=1).values
leaderboard_df.sort_values(by='avg',ascending=False,inplace=True,axis=0)
numeric_cols = leaderboard_df.select_dtypes(include=['number']).columns
print('NUMERIC COLS: ', numeric_cols)
# print(numeric_cols)
leaderboard_df[numeric_cols] = leaderboard_df[numeric_cols].round(3)
print('LEADERBOARD DF AFTER ROUND: ', leaderboard_df)
return leaderboard_df.copy()