Spaces:
Running
Running
File size: 2,284 Bytes
c8763bd d8b9ce2 d262fb3 c8763bd d262fb3 c8763bd d262fb3 c8763bd d262fb3 c8763bd d262fb3 c8763bd d8b9ce2 c8763bd d262fb3 d8b9ce2 d262fb3 d8b9ce2 c8763bd d262fb3 c8763bd d8b9ce2 c8763bd d262fb3 c8763bd d8b9ce2 d262fb3 c8763bd d262fb3 c8763bd d262fb3 c8763bd 5aacd58 c8763bd d262fb3 c8763bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import os
import gradio as gr
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from src.assets.text_content import TITLE, INTRODUCTION_TEXT
from src.assets.css_html_js import custom_css, get_window_url_params
from src.utils import restart_space, load_dataset_repo, make_clickable_model
LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN")
llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
def get_vanilla_benchmark_df():
if llm_perf_dataset_repo:
llm_perf_dataset_repo.git_pull()
df = pd.read_csv(
"./llm-perf-dataset/reports/cuda_1_100/inference_report.csv")
df = df[["model", "backend.name", "backend.torch_dtype", "backend.quantization",
"generate.latency(s)", "generate.throughput(tokens/s)"]]
df["model"] = df["model"].apply(make_clickable_model)
df.rename(columns={
"model": "Model",
"backend.name": "Backend 🏭",
"backend.torch_dtype": "Load dtype",
"backend.quantization": "Quantization 🗜️",
"generate.latency(s)": "Latency (s) ⬇️",
"generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
}, inplace=True)
df.sort_values(by=["Throughput (tokens/s) ⬆️"],
ascending=False, inplace=True)
return df
# Define demo interface
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("Vanilla Benchmark", elem_id="vanilla-benchmark", id=0):
vanilla_benchmark_df = get_vanilla_benchmark_df()
leaderboard_table_lite = gr.components.Dataframe(
value=vanilla_benchmark_df,
headers=vanilla_benchmark_df.columns.tolist(),
elem_id="vanilla-benchmark",
)
# Restart space every hour
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600,
args=[LLM_PERF_LEADERBOARD_REPO, OPTIMUM_TOKEN])
scheduler.start()
# Launch demo
demo.queue(concurrency_count=40).launch()
|