File size: 2,989 Bytes
54fdeab 845c238 54fdeab 66333bd 54fdeab c604eb0 66c91d7 7a01fda 99b47fb a863aa6 99b47fb 7a01fda 530329e 66333bd 845c238 c604eb0 66333bd c4887da c604eb0 530329e 2472705 50c626b 6496c92 2cd88e3 d25b665 7c72599 54fdeab 94c68e9 54fdeab 6496c92 17c1bbe 54fdeab c604eb0 a8e2f0c 5ab1e76 54fdeab e72de6c 54fdeab 17c1bbe 54fdeab 17c1bbe 54fdeab 75afdc2 54fdeab 66c91d7 54fdeab 75afdc2 54fdeab 94c68e9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import gradio as gr
from gradio_leaderboard import Leaderboard, SelectColumns, ColumnFilter
from pathlib import Path
from utils import LLM_BENCHMARKS_ABOUT_TEXT, LLM_BENCHMARKS_SUBMIT_TEXT, custom_css, jsonl_to_dataframe, add_average_column_to_df, apply_markdown_format_for_columns, submit, PART_LOGO, sort_dataframe_by_column
abs_path = Path(__file__).parent
# Any pandas-compatible data
leaderboard_df = jsonl_to_dataframe(str(abs_path / "leaderboard_data.jsonl"))
average_column_name = "Average Accuracy"
all_columns = ["Model", average_column_name, "Precision", "#Params (B)", "Part Multiple Choice", "ARC Easy", "ARC Challenge", "MMLU Pro", "AUT Multiple Choice Persian", 'GSM8K', 'Homograph Hard', 'PIQA', 'Verb Tense', 'Proverb', 'Homegraph Easy', 'Winogrande', 'General Knowledge']
columns_to_average = ["Part Multiple Choice", "ARC Easy", "ARC Challenge", "MMLU Pro", "AUT Multiple Choice Persian", 'GSM8K', 'Homograph Hard', 'PIQA', 'Verb Tense', 'Proverb', 'Homegraph Easy', 'Winogrande', 'General Knowledge']
leaderboard_df = add_average_column_to_df(leaderboard_df, columns_to_average, index=3, average_column_name=average_column_name)
leaderboard_df = apply_markdown_format_for_columns(df=leaderboard_df, model_column_name="Model")
leaderboard_df = sort_dataframe_by_column(leaderboard_df, column_name=average_column_name)
columns_data_type = ["markdown" for i in range(len(leaderboard_df.columns))]
# "str", "number", "bool", "date", "markdown"
# columns_data_type[0] = "markdown"
NUM_MODELS=len(leaderboard_df)
with gr.Blocks(css=custom_css,theme=gr.themes.Default(font=["sans-serif","ui-sans-serif","system-ui"], font_mono=["monospace","ui-monospace","Consolas"])) as demo:
# gr.HTML(PART_LOGO)
gr.Markdown("""
# Open Persian LLM Leaderboard
""")
gr.Markdown(f"""
- **Total Models**: {NUM_MODELS}
""")
with gr.Tab("ποΈ Persian Leaderboard"):
Leaderboard(
value=leaderboard_df,
datatype=columns_data_type,
select_columns=SelectColumns(
default_selection=all_columns,
cant_deselect=["Model"],
label="Select Columns to Show",
),
search_columns=["model_name_for_query"],
hide_columns=["model_name_for_query",],
filter_columns=["Precision", "#Params (B)"],
)
with gr.TabItem("π About"):
gr.Markdown(LLM_BENCHMARKS_ABOUT_TEXT)
with gr.Tab("βοΈ Submit"):
gr.Markdown(LLM_BENCHMARKS_SUBMIT_TEXT)
model_name = gr.Textbox(label="Model name")
model_id = gr.Textbox(label="username/space e.g PartAI/Dorna-Llama3-8B-Instruct")
contact_email = gr.Textbox(label="Contact E-Mail")
submit_btn = gr.Button("Submit")
submit_btn.click(submit, inputs=[model_name, model_id, contact_email], outputs=[])
gr.Markdown("""
Please find more information about Part DP AI on [partdp.ai](https://partdp.ai)""")
if __name__ == "__main__":
demo.launch()
|