|
|
|
|
|
|
|
import pandas as pd |
|
import gradio as gr |
|
|
|
|
|
def make_default_md_1(): |
|
link_color = "#1976D2" |
|
leaderboard_md = f""" |
|
# π EffiBench Leaderboard π |
|
<a href='https://arxiv.org/abs/2402.02037' style='color: {link_color}; text-decoration: none;'>Paper</a> | |
|
<a href='https://github.com/huangd1999/EffiBench' style='color: {link_color}; text-decoration: none;'>GitHub</a> | |
|
<a href='https://github.com/huangd1999/EffiBench/tree/main/data' style='color: {link_color}; text-decoration: none;'>Dataset</a> | |
|
""" |
|
|
|
return leaderboard_md |
|
|
|
def make_default_md_2(): |
|
leaderboard_md = f""" |
|
π€ [filing a request](https://github.com/huangd1999/EffiBench/issues/new?assignees=&labels=model+eval&projects=&template=model_eval_request.yml&title=%F0%9F%92%A1+%5BREQUEST%5D+-+%3CMODEL_NAME%3E) to add your models on our leaderboard!** |
|
""" |
|
|
|
return leaderboard_md |
|
|
|
leaderboard_md = """ |
|
Three benchmarks are displayed: **Arena Elo**, **MT-Bench** and **MMLU**. |
|
- [Chatbot Arena](https://chat.lmsys.org/?arena) - a crowdsourced, randomized battle platform. We use 500K+ user votes to compute model strength. |
|
- [MT-Bench](https://arxiv.org/abs/2306.05685): a set of challenging multi-turn questions. We use GPT-4 to grade the model responses. |
|
- [MMLU](https://arxiv.org/abs/2009.03300) (5-shot): a test to measure a model's multitask accuracy on 57 tasks. |
|
|
|
π» Code: The MT-bench scores (single-answer grading on a scale of 10) are computed by [fastchat.llm_judge](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge). |
|
The MMLU scores are mostly computed by [InstructEval](https://github.com/declare-lab/instruct-eval). |
|
Higher values are better for all benchmarks. Empty cells mean not available. |
|
""" |
|
|
|
acknowledgment_md = """ |
|
### Terms of Service |
|
|
|
Users are required to agree to the following terms before using the service: |
|
|
|
The service is a research preview. It only provides limited safety measures and may generate offensive content. |
|
It must not be used for any illegal, harmful, violent, racist, or sexual purposes. |
|
Please do not upload any private information. |
|
The service collects user dialogue data, including both text and images, and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) or a similar license. |
|
|
|
### Acknowledgment |
|
We thank [UC Berkeley SkyLab](https://sky.cs.berkeley.edu/), [Kaggle](https://www.kaggle.com/), [MBZUAI](https://mbzuai.ac.ae/), [a16z](https://www.a16z.com/), [Together AI](https://www.together.ai/), [Hyperbolic](https://hyperbolic.xyz/), [Anyscale](https://www.anyscale.com/), [HuggingFace](https://huggingface.co/) for their generous [sponsorship](https://lmsys.org/donations/). |
|
|
|
<div class="sponsor-image-about"> |
|
<img src="https://storage.googleapis.com/public-arena-asset/skylab.png" alt="SkyLab"> |
|
<img src="https://storage.googleapis.com/public-arena-asset/kaggle.png" alt="Kaggle"> |
|
<img src="https://storage.googleapis.com/public-arena-asset/mbzuai.jpeg" alt="MBZUAI"> |
|
<img src="https://storage.googleapis.com/public-arena-asset/a16z.jpeg" alt="a16z"> |
|
<img src="https://storage.googleapis.com/public-arena-asset/together.png" alt="Together AI"> |
|
<img src="https://storage.googleapis.com/public-arena-asset/hyperbolic_logo.png" alt="Hyperbolic"> |
|
<img src="https://storage.googleapis.com/public-arena-asset/anyscale.png" alt="AnyScale"> |
|
<img src="https://storage.googleapis.com/public-arena-asset/huggingface.png" alt="HuggingFace"> |
|
</div> |
|
""" |
|
|
|
citation_md = """ |
|
### Citation |
|
Please cite the following paper if you find our leaderboard or dataset helpful. |
|
``` |
|
@misc{chiang2024chatbot, |
|
title={Chatbot Arena: An Open Platform for Evaluating LLMs by Human Preference}, |
|
author={Wei-Lin Chiang and Lianmin Zheng and Ying Sheng and Anastasios Nikolas Angelopoulos and Tianle Li and Dacheng Li and Hao Zhang and Banghua Zhu and Michael Jordan and Joseph E. Gonzalez and Ion Stoica}, |
|
year={2024}, |
|
eprint={2403.04132}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.AI} |
|
} |
|
""" |
|
|
|
def build_leaderboard_tab(leaderboard_table_file): |
|
gr.Markdown(make_default_md_1(), elem_id="leaderboard_markdown") |
|
gr.Markdown(make_default_md_2(), elem_id="leaderboard_markdown") |
|
|
|
df = pd.read_csv(leaderboard_table_file) |
|
def filter_leaderboard(dataset, timeout): |
|
filtered_df = df[(df['Timeout'] == timeout) & (df['Dataset'] == dataset)] |
|
return filtered_df.drop(columns=['Timeout', 'Dataset']) |
|
|
|
datasets = df['Dataset'].unique().tolist() |
|
timeouts = df['Timeout'].unique().tolist() |
|
|
|
with gr.Tab("Leaderboard"): |
|
gr.Markdown(leaderboard_md, elem_id="leaderboard_markdown") |
|
with gr.Row(): |
|
dataset_dropdown = gr.Dropdown(label="Dataset", choices=datasets, value=datasets[0]) |
|
timeout_dropdown = gr.Dropdown(label="Timeout", choices=timeouts, value=timeouts[0]) |
|
|
|
initial_data = filter_leaderboard(datasets[0], timeouts[0]) |
|
leaderboard = gr.Dataframe(value=initial_data) |
|
|
|
def update_leaderboard(dataset, timeout): |
|
filtered_data = filter_leaderboard(dataset, timeout) |
|
return filtered_data |
|
|
|
|
|
dataset_dropdown.change(fn=update_leaderboard, inputs=[dataset_dropdown, timeout_dropdown], outputs=leaderboard) |
|
timeout_dropdown.change(fn=update_leaderboard, inputs=[dataset_dropdown, timeout_dropdown], outputs=leaderboard) |
|
|
|
with gr.Accordion("Citation", open=True): |
|
gr.Markdown(citation_md, elem_id="leaderboard_markdown") |
|
gr.Markdown(acknowledgment_md, elem_id="ack_markdown") |
|
|
|
|