File size: 3,035 Bytes
737fa2d
 
 
 
 
1a8bc79
737fa2d
 
 
 
 
 
 
 
 
 
 
 
 
 
ff8b196
1a8bc79
737fa2d
 
 
 
 
1a8bc79
737fa2d
 
 
 
1a8bc79
 
 
 
 
 
737fa2d
 
 
 
1a8bc79
f19f8d1
737fa2d
 
 
 
 
 
 
 
fce7d31
 
 
 
 
35eacb4
fce7d31
 
35eacb4
fce7d31
 
737fa2d
 
 
 
 
 
 
1a8bc79
737fa2d
 
fc8fa64
737fa2d
 
8931773
1a8bc79
737fa2d
 
3af1f54
737fa2d
 
 
35eacb4
737fa2d
 
 
 
 
 
 
1a8bc79
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import gradio as gr
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
from datasets import load_dataset

from src.about import (
    CITATION_BUTTON_LABEL,
    CITATION_BUTTON_TEXT,
    EVALUATION_QUEUE_TEXT,
    INTRODUCTION_TEXT,
    TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
    COLS,
    AutoEvalColumn,
    fields,
)
from src.envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO, REPO_ID, TOKEN
from src.populate import get_leaderboard_df


def restart_space():
    API.restart_space(repo_id=REPO_ID)


### Space initialisation
try:
    print(EVAL_REQUESTS_PATH)
    snapshot_download(
        repo_id=QUEUE_REPO,
        local_dir=EVAL_REQUESTS_PATH,
        repo_type="dataset",
        tqdm_class=None,
        etag_timeout=30,
        token=TOKEN,
    )
except Exception:
    restart_space()

total_issues = load_dataset("dtcxzyw/llvm-apr-benchmark").num_rows["test"]
LEADERBOARD_DF = get_leaderboard_df(EVAL_REQUESTS_PATH, COLS)


def init_leaderboard(dataframe):
    if dataframe is None or dataframe.empty:
        raise ValueError("Leaderboard DataFrame is empty or None.")
    return Leaderboard(
        value=dataframe,
        datatype=[c.type for c in fields(AutoEvalColumn)],
        select_columns=SelectColumns(
            default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
            cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
            label="Select Columns to Display:",
        ),
        search_columns=[AutoEvalColumn.method_name.name],
        hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
        filter_columns=[
            ColumnFilter(AutoEvalColumn.with_hint.name, type="checkboxgroup", label="Hint"),
        ],
        bool_checkboxgroup_label="Hide models",
        interactive=False,
    )


demo = gr.Blocks(css=custom_css)
with demo:
    gr.HTML(TITLE)
    gr.Markdown(INTRODUCTION_TEXT + f"\nTotal issues: {total_issues}\n", elem_classes="markdown-text")

    with gr.Tabs(elem_classes="tab-buttons") as tabs:
        with gr.TabItem("πŸ… Leaderboard", elem_id="llm-benchmark-tab-table", id=0):
            leaderboard = init_leaderboard(LEADERBOARD_DF)

        with gr.TabItem("πŸš€ Submission", elem_id="llm-benchmark-tab-table", id=1):
            gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")

    with gr.Row():
        with gr.Accordion("πŸ“™ Citation", open=False):
            citation_button = gr.Textbox(
                value=CITATION_BUTTON_TEXT,
                label=CITATION_BUTTON_LABEL,
                lines=6,
                elem_id="citation-button",
                show_copy_button=True,
            )

scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()