Spaces:
Running
Running
File size: 3,737 Bytes
9346f1c 8b28d2b 9346f1c 4596a70 2a5f9fb 1ffc326 8c49cb6 976f398 df66f6e 37b74a1 2f420b7 8c49cb6 2a73469 10f9b3c 50df158 d084b26 37b74a1 8b28d2b d084b26 046ddc7 d084b26 37b74a1 d084b26 37b74a1 d084b26 26286b2 a885f09 dd084c3 2a73469 614ee1f dd084c3 8b28d2b dd084c3 7644705 01233b7 58733e4 6e8f400 10f9b3c 8cb7546 982779d 8b28d2b f2bc0a5 613696b 6e8f400 0227006 046ddc7 d16cee2 10f9b3c a2790cb 10f9b3c 37b74a1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
import gradio as gr
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT,
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
BENCHMARK_COLS,
COLS,
EVAL_COLS,
EVAL_TYPES,
AutoEvalColumn,
fields,
)
from src.envs import (
API,
EVAL_DETAILED_RESULTS_PATH,
EVAL_RESULTS_PATH,
EVAL_DETAILED_RESULTS_REPO,
REPO_ID,
RESULTS_REPO,
TOKEN,
)
from src.populate import get_leaderboard_df
def restart_space():
API.restart_space(repo_id=REPO_ID)
### Space initialisation
try:
print(EVAL_DETAILED_RESULTS_REPO)
snapshot_download(
repo_id=EVAL_DETAILED_RESULTS_REPO,
local_dir=EVAL_DETAILED_RESULTS_PATH,
repo_type="dataset",
tqdm_class=None,
etag_timeout=30,
token=TOKEN,
)
except Exception:
restart_space()
try:
print(EVAL_RESULTS_PATH)
snapshot_download(
repo_id=RESULTS_REPO,
local_dir=EVAL_RESULTS_PATH,
repo_type="dataset",
tqdm_class=None,
etag_timeout=30,
token=TOKEN,
)
except Exception:
restart_space()
LEADERBOARD_DF = get_leaderboard_df(RESULTS_REPO, EVAL_RESULTS_PATH)
def init_leaderboard(dataframes):
if dataframes is None or not dataframes:
raise ValueError("Leaderboard data is empty or None.")
def create_leaderboard(df):
return Leaderboard(
value=df,
datatype=[c.type for c in fields(AutoEvalColumn)],
select_columns=SelectColumns(
default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
label="Select Columns to Display:",
),
search_columns=[AutoEvalColumn.model.name],
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
filter_columns=[],
interactive=False,
)
subset_names = list(dataframes.keys())
selected_subset = gr.Dropdown(choices=subset_names, label="Select Dataset Subset", value=subset_names[0])
leaderboard = gr.Dynamic(create_leaderboard, inputs=[selected_subset], outputs="output")
selected_subset.change(
fn=lambda x: create_leaderboard(dataframes[x]),
inputs=[selected_subset],
outputs=leaderboard
)
return leaderboard
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("π
LiveBench Results", elem_id="llm-benchmark-tab-table", id=0):
leaderboard = init_leaderboard(LEADERBOARD_DF)
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
# with gr.Row():
# with gr.Accordion("π Citation", open=False):
# citation_button = gr.Textbox(
# value=CITATION_BUTTON_TEXT,
# label=CITATION_BUTTON_LABEL,
# lines=20,
# elem_id="citation-button",
# show_copy_button=True,
# )
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()
|