|
import subprocess |
|
import gradio as gr |
|
import pandas as pd |
|
from apscheduler.schedulers.background import BackgroundScheduler |
|
from huggingface_hub import snapshot_download |
|
|
|
from src.about import ( |
|
CITATION_BUTTON_LABEL, |
|
CITATION_BUTTON_TEXT, |
|
EVALUATION_QUEUE_TEXT, |
|
INTRODUCTION_TEXT, |
|
LLM_BENCHMARKS_TEXT, |
|
TITLE, |
|
nc_tasks, |
|
nr_tasks, |
|
lp_tasks, |
|
) |
|
from src.display.css_html_js import custom_css |
|
from src.display.utils import ( |
|
BENCHMARK_COLS, |
|
COLS, |
|
COLS_NC, |
|
COLS_NR, |
|
COLS_LP, |
|
EVAL_COLS, |
|
EVAL_TYPES, |
|
NUMERIC_INTERVALS, |
|
TYPES, |
|
AutoEvalColumn_NodeClassification, |
|
AutoEvalColumn_NodeRegression, |
|
AutoEvalColumn_LinkPrediction, |
|
|
|
ModelType, |
|
TASK_LIST, |
|
OFFICIAL, |
|
HONOR, |
|
fields, |
|
WeightType, |
|
Precision |
|
) |
|
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN |
|
from src.populate import get_evaluation_queue_df, get_leaderboard_df |
|
from src.submission.submit import add_new_eval |
|
|
|
|
|
def restart_space(): |
|
API.restart_space(repo_id=REPO_ID) |
|
|
|
try: |
|
print(EVAL_REQUESTS_PATH) |
|
snapshot_download( |
|
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN |
|
) |
|
except Exception: |
|
restart_space() |
|
try: |
|
print(EVAL_RESULTS_PATH) |
|
snapshot_download( |
|
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN |
|
) |
|
except Exception: |
|
restart_space() |
|
|
|
|
|
|
|
def update_table( |
|
hidden_df: pd.DataFrame, |
|
columns: list, |
|
query: str, |
|
): |
|
|
|
filtered_df = filter_queries(query, hidden_df) |
|
df = select_columns(filtered_df, columns) |
|
return df |
|
|
|
|
|
def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame: |
|
return df[(df[AutoEvalColumn.model.name].str.contains(query, case=False))] |
|
|
|
|
|
def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame: |
|
always_here_cols = [ |
|
"Model" |
|
] |
|
|
|
filtered_df = df[ |
|
always_here_cols + [c for c in COLS if c in df.columns and c in columns] |
|
] |
|
return filtered_df |
|
|
|
|
|
def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame: |
|
final_df = [] |
|
if query != "": |
|
queries = [q.strip() for q in query.split(";")] |
|
for _q in queries: |
|
_q = _q.strip() |
|
if _q != "": |
|
temp_filtered_df = search_table(filtered_df, _q) |
|
if len(temp_filtered_df) > 0: |
|
final_df.append(temp_filtered_df) |
|
if len(final_df) > 0: |
|
filtered_df = pd.concat(final_df) |
|
filtered_df = filtered_df.drop_duplicates( |
|
subset=[AutoEvalColumn.model.name] |
|
) |
|
|
|
return filtered_df |
|
|
|
|
|
def filter_models( |
|
df: pd.DataFrame, size_query: list, show_deleted: bool |
|
) -> pd.DataFrame: |
|
|
|
if show_deleted: |
|
filtered_df = df |
|
else: |
|
filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True] |
|
|
|
|
|
|
|
|
|
|
|
numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query])) |
|
params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce") |
|
mask = params_column.apply(lambda x: any(numeric_interval.contains(x))) |
|
filtered_df = filtered_df.loc[mask] |
|
|
|
return filtered_df |
|
|
|
demo = gr.Blocks(css=custom_css) |
|
with demo: |
|
gr.HTML(TITLE) |
|
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") |
|
|
|
with gr.Tabs(elem_classes="tab-buttons") as tabs: |
|
with gr.TabItem("π
Entity Classification Leaderboard", elem_id="llm-benchmark-tab-table", id=0): |
|
COLS = COLS_NC |
|
AutoEvalColumn = AutoEvalColumn_NodeClassification |
|
original_df = get_leaderboard_df(EVAL_REQUESTS_PATH, "Node Classification") |
|
leaderboard_df = original_df.copy() |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Row(): |
|
search_bar = gr.Textbox( |
|
placeholder=" π Search for your model (separate multiple queries with `;`) and press ENTER...", |
|
show_label=False, |
|
elem_id="search-bar", |
|
) |
|
with gr.Row(): |
|
shown_columns = gr.CheckboxGroup( |
|
choices=[ |
|
c.name |
|
for c in fields(AutoEvalColumn) |
|
if not c.hidden and not c.never_hidden |
|
], |
|
value=[ |
|
c.name |
|
for c in fields(AutoEvalColumn) |
|
if c.displayed_by_default and not c.hidden and not c.never_hidden |
|
], |
|
label="Select columns to show", |
|
elem_id="column-select", |
|
interactive=True, |
|
) |
|
|
|
print(leaderboard_df) |
|
print(fields(AutoEvalColumn)) |
|
leaderboard_table = gr.components.Dataframe( |
|
value=leaderboard_df[ |
|
[c.name for c in fields(AutoEvalColumn) if c.never_hidden] |
|
+ shown_columns.value |
|
], |
|
headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value, |
|
datatype=TYPES, |
|
elem_id="leaderboard-table", |
|
interactive=False, |
|
visible=True, |
|
) |
|
|
|
|
|
hidden_leaderboard_table_for_search = gr.components.Dataframe( |
|
value=original_df[COLS], |
|
headers=COLS, |
|
datatype=TYPES, |
|
visible=False, |
|
) |
|
search_bar.submit( |
|
update_table, |
|
[ |
|
hidden_leaderboard_table_for_search, |
|
shown_columns, |
|
search_bar, |
|
], |
|
leaderboard_table, |
|
) |
|
for selector in [shown_columns]: |
|
selector.change( |
|
update_table, |
|
[ |
|
hidden_leaderboard_table_for_search, |
|
shown_columns, |
|
search_bar, |
|
], |
|
leaderboard_table, |
|
queue=True, |
|
) |
|
gr.Markdown("Evaluation metric: AUROC β¬οΈ") |
|
|
|
|
|
with gr.TabItem("π
Entity Regression Leaderboard", elem_id="llm-benchmark-tab-table", id=1): |
|
COLS = COLS_NR |
|
AutoEvalColumn = AutoEvalColumn_NodeRegression |
|
original_df = get_leaderboard_df(EVAL_REQUESTS_PATH, "Node Regression") |
|
leaderboard_df = original_df.copy() |
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Row(): |
|
search_bar = gr.Textbox( |
|
placeholder=" π Search for your model (separate multiple queries with `;`) and press ENTER...", |
|
show_label=False, |
|
elem_id="search-bar", |
|
) |
|
with gr.Row(): |
|
shown_columns = gr.CheckboxGroup( |
|
choices=[ |
|
c.name |
|
for c in fields(AutoEvalColumn) |
|
if not c.hidden and not c.never_hidden |
|
], |
|
value=[ |
|
c.name |
|
for c in fields(AutoEvalColumn) |
|
if c.displayed_by_default and not c.hidden and not c.never_hidden |
|
], |
|
label="Select columns to show", |
|
elem_id="column-select", |
|
interactive=True, |
|
) |
|
|
|
print(leaderboard_df) |
|
print(fields(AutoEvalColumn)) |
|
leaderboard_table = gr.components.Dataframe( |
|
value=leaderboard_df[ |
|
[c.name for c in fields(AutoEvalColumn) if c.never_hidden] |
|
+ shown_columns.value |
|
], |
|
headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value, |
|
datatype=TYPES, |
|
elem_id="leaderboard-table", |
|
interactive=False, |
|
visible=True, |
|
) |
|
|
|
|
|
hidden_leaderboard_table_for_search = gr.components.Dataframe( |
|
value=original_df[COLS], |
|
headers=COLS, |
|
datatype=TYPES, |
|
visible=False, |
|
) |
|
search_bar.submit( |
|
update_table, |
|
[ |
|
hidden_leaderboard_table_for_search, |
|
shown_columns, |
|
search_bar, |
|
], |
|
leaderboard_table, |
|
) |
|
for selector in [shown_columns]: |
|
selector.change( |
|
update_table, |
|
[ |
|
hidden_leaderboard_table_for_search, |
|
shown_columns, |
|
search_bar, |
|
], |
|
leaderboard_table, |
|
queue=True, |
|
) |
|
gr.Markdown("Evaluation metric: MAE β¬οΈ") |
|
|
|
with gr.TabItem("π
Recommendation Leaderboard", elem_id="llm-benchmark-tab-table", id=2): |
|
COLS = COLS_LP |
|
AutoEvalColumn = AutoEvalColumn_LinkPrediction |
|
original_df = get_leaderboard_df(EVAL_REQUESTS_PATH, "Link Prediction") |
|
leaderboard_df = original_df.copy() |
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Row(): |
|
search_bar = gr.Textbox( |
|
placeholder=" π Search for your model (separate multiple queries with `;`) and press ENTER...", |
|
show_label=False, |
|
elem_id="search-bar", |
|
) |
|
with gr.Row(): |
|
shown_columns = gr.CheckboxGroup( |
|
choices=[ |
|
c.name |
|
for c in fields(AutoEvalColumn) |
|
if not c.hidden and not c.never_hidden |
|
], |
|
value=[ |
|
c.name |
|
for c in fields(AutoEvalColumn) |
|
if c.displayed_by_default and not c.hidden and not c.never_hidden |
|
], |
|
label="Select columns to show", |
|
elem_id="column-select", |
|
interactive=True, |
|
) |
|
|
|
print(leaderboard_df) |
|
print(fields(AutoEvalColumn)) |
|
leaderboard_table = gr.components.Dataframe( |
|
value=leaderboard_df[ |
|
[c.name for c in fields(AutoEvalColumn) if c.never_hidden] |
|
+ shown_columns.value |
|
], |
|
headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value, |
|
datatype=TYPES, |
|
elem_id="leaderboard-table", |
|
interactive=False, |
|
visible=True, |
|
) |
|
|
|
|
|
hidden_leaderboard_table_for_search = gr.components.Dataframe( |
|
value=original_df[COLS], |
|
headers=COLS, |
|
datatype=TYPES, |
|
visible=False, |
|
) |
|
search_bar.submit( |
|
update_table, |
|
[ |
|
hidden_leaderboard_table_for_search, |
|
shown_columns, |
|
search_bar, |
|
], |
|
leaderboard_table, |
|
) |
|
for selector in [shown_columns]: |
|
selector.change( |
|
update_table, |
|
[ |
|
hidden_leaderboard_table_for_search, |
|
shown_columns, |
|
search_bar, |
|
], |
|
leaderboard_table, |
|
queue=True, |
|
) |
|
gr.Markdown("Evaluation metric: MAP β¬οΈ") |
|
|
|
|
|
with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=3): |
|
with gr.Column(): |
|
with gr.Row(): |
|
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text") |
|
|
|
with gr.Row(): |
|
gr.Markdown("# βοΈβ¨ Submit your model here!", elem_classes="markdown-text") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
author_name_textbox = gr.Textbox(label="Your name") |
|
email_textbox = gr.Textbox(label="Your email") |
|
relbench_version_textbox = gr.Textbox(label="RelBench version") |
|
|
|
model_name_textbox = gr.Textbox(label="Model name") |
|
|
|
''' |
|
dataset_name_textbox = gr.Dropdown( |
|
choices=[t.value.name for t in TASK_LIST], |
|
label="Task name (e.g. rel-amazon-user-churn)", |
|
multiselect=False, |
|
value=None, |
|
interactive=True, |
|
) |
|
''' |
|
|
|
official_or_not = gr.Dropdown( |
|
choices=[i.value.name for i in OFFICIAL], |
|
label="Is it an official submission?", |
|
multiselect=False, |
|
value=None, |
|
interactive=True, |
|
) |
|
paper_url_textbox = gr.Textbox(label="Paper URL Link") |
|
github_url_textbox = gr.Textbox(label="GitHub URL Link") |
|
|
|
task_track = gr.Dropdown( |
|
choices=['Entity Classification', 'Entity Regression', 'Recommendation'], |
|
label="Choose the task track", |
|
multiselect=False, |
|
value=None, |
|
interactive=True, |
|
) |
|
honor_code = gr.Dropdown( |
|
choices=[i.value.name for i in HONOR], |
|
label="Do you agree to the honor code?", |
|
multiselect=False, |
|
value=None, |
|
interactive=True, |
|
) |
|
|
|
|
|
with gr.Column(): |
|
test_performance = gr.Textbox(lines = 16, label="Test set performance, use {task: [mean,std]} format e.g. {'rel-amazon/user-churn': [0.352,0.023], 'rel-amazon/user-ltv': [0.304,0.022], ...}") |
|
valid_performance = gr.Textbox(lines = 16, label="Validation set performance, use {task: [mean,std]} format e.g. {'rel-amazon/user-churn': [0.352,0.023], 'rel-amazon/user-ltv': [0.304,0.022], ...}") |
|
|
|
|
|
submit_button = gr.Button("Submit Eval") |
|
submission_result = gr.Markdown() |
|
submit_button.click( |
|
add_new_eval, |
|
[ |
|
author_name_textbox, |
|
email_textbox, |
|
relbench_version_textbox, |
|
model_name_textbox, |
|
official_or_not, |
|
test_performance, |
|
valid_performance, |
|
paper_url_textbox, |
|
github_url_textbox, |
|
|
|
honor_code, |
|
task_track |
|
], |
|
submission_result, |
|
) |
|
|
|
|
|
scheduler = BackgroundScheduler() |
|
scheduler.add_job(restart_space, "interval", seconds=1800) |
|
scheduler.start() |
|
demo.queue(default_concurrency_limit=40).launch() |