Spaces:
Running
Running
BenchmarkBot
commited on
Commit
Β·
1cbd09d
1
Parent(s):
c8763bd
minimal
Browse files
app.py
CHANGED
@@ -6,14 +6,13 @@ from huggingface_hub import Repository
|
|
6 |
from apscheduler.schedulers.background import BackgroundScheduler
|
7 |
|
8 |
from src.assets.text_content import *
|
9 |
-
from src.assets.css_html_js import custom_css
|
10 |
|
11 |
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
|
12 |
|
13 |
LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
|
14 |
LLM_PERF_DATASET_REPO = "optimum/llm-perf"
|
15 |
|
16 |
-
|
17 |
api = HfApi()
|
18 |
|
19 |
|
@@ -26,6 +25,7 @@ def restart_space():
|
|
26 |
def load_all_info_from_hub():
|
27 |
llm_perf_repo = None
|
28 |
if OPTIMUM_TOKEN:
|
|
|
29 |
llm_perf_repo = Repository(
|
30 |
local_dir="./llm-perf/",
|
31 |
clone_from=LLM_PERF_DATASET_REPO,
|
@@ -53,6 +53,7 @@ def get_leaderboard_df():
|
|
53 |
llm_perf_repo.git_pull()
|
54 |
|
55 |
df = pd.read_csv("./llm-perf/reports/cuda_1_100/inference_report.csv")
|
|
|
56 |
|
57 |
return df
|
58 |
|
@@ -72,36 +73,16 @@ with demo:
|
|
72 |
gr.HTML(TITLE)
|
73 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
74 |
|
|
|
75 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
76 |
with gr.TabItem("π
LLM-Perf Benchmark", elem_id="llm-perf-benchmark-tab-table", id=0):
|
77 |
leaderboard_table_lite = gr.components.Dataframe(
|
78 |
value=leaderboard_df,
|
79 |
headers=leaderboard_df.columns.tolist(),
|
80 |
-
# datatype=TYPES_LITE,
|
81 |
max_rows=None,
|
82 |
elem_id="leaderboard-table-lite",
|
83 |
)
|
84 |
|
85 |
-
with gr.Row():
|
86 |
-
with gr.Column():
|
87 |
-
with gr.Accordion("π Citation", open=False):
|
88 |
-
citation_button = gr.Textbox(
|
89 |
-
value=CITATION_BUTTON_TEXT,
|
90 |
-
label=CITATION_BUTTON_LABEL,
|
91 |
-
elem_id="citation-button",
|
92 |
-
).style(show_copy_button=True)
|
93 |
-
with gr.Column():
|
94 |
-
with gr.Accordion("β¨ CHANGELOG", open=False):
|
95 |
-
changelog = gr.Markdown(
|
96 |
-
CHANGELOG_TEXT, elem_id="changelog-text")
|
97 |
-
|
98 |
-
dummy = gr.Textbox(visible=False)
|
99 |
-
demo.load(
|
100 |
-
dummy,
|
101 |
-
tabs,
|
102 |
-
_js=get_window_url_params,
|
103 |
-
)
|
104 |
-
|
105 |
scheduler = BackgroundScheduler()
|
106 |
scheduler.add_job(restart_space, "interval", seconds=3600)
|
107 |
scheduler.start()
|
|
|
6 |
from apscheduler.schedulers.background import BackgroundScheduler
|
7 |
|
8 |
from src.assets.text_content import *
|
9 |
+
from src.assets.css_html_js import custom_css
|
10 |
|
11 |
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
|
12 |
|
13 |
LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
|
14 |
LLM_PERF_DATASET_REPO = "optimum/llm-perf"
|
15 |
|
|
|
16 |
api = HfApi()
|
17 |
|
18 |
|
|
|
25 |
def load_all_info_from_hub():
|
26 |
llm_perf_repo = None
|
27 |
if OPTIMUM_TOKEN:
|
28 |
+
print("Loading LLM-Perf-Dataset from Hub...")
|
29 |
llm_perf_repo = Repository(
|
30 |
local_dir="./llm-perf/",
|
31 |
clone_from=LLM_PERF_DATASET_REPO,
|
|
|
53 |
llm_perf_repo.git_pull()
|
54 |
|
55 |
df = pd.read_csv("./llm-perf/reports/cuda_1_100/inference_report.csv")
|
56 |
+
print(df.columns)
|
57 |
|
58 |
return df
|
59 |
|
|
|
73 |
gr.HTML(TITLE)
|
74 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
75 |
|
76 |
+
print("rendering tab...")
|
77 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
78 |
with gr.TabItem("π
LLM-Perf Benchmark", elem_id="llm-perf-benchmark-tab-table", id=0):
|
79 |
leaderboard_table_lite = gr.components.Dataframe(
|
80 |
value=leaderboard_df,
|
81 |
headers=leaderboard_df.columns.tolist(),
|
|
|
82 |
max_rows=None,
|
83 |
elem_id="leaderboard-table-lite",
|
84 |
)
|
85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
scheduler = BackgroundScheduler()
|
87 |
scheduler.add_job(restart_space, "interval", seconds=3600)
|
88 |
scheduler.start()
|