Update app.py
Browse files
app.py
CHANGED
@@ -158,6 +158,14 @@ with gr.Blocks(css=LEADERBOARD_CSS) as demo:
|
|
158 |
with gr.TabItem("π Whisper Model Leaderboard", elem_id="whisper-backends-tab", id=1):
|
159 |
gr.Markdown("## Whisper Model Performance Across Different Backends", elem_classes="markdown-text")
|
160 |
gr.Markdown("This table shows how different Whisper model implementations compare in terms of performance and speed.", elem_classes="markdown-text")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
|
162 |
with gr.Row():
|
163 |
backend_filter = gr.Dropdown(
|
|
|
158 |
with gr.TabItem("π Whisper Model Leaderboard", elem_id="whisper-backends-tab", id=1):
|
159 |
gr.Markdown("## Whisper Model Performance Across Different Backends", elem_classes="markdown-text")
|
160 |
gr.Markdown("This table shows how different Whisper model implementations compare in terms of performance and speed.", elem_classes="markdown-text")
|
161 |
+
|
162 |
+
gr.Markdown(
|
163 |
+
"""
|
164 |
+
* For CUDA tests, we used an **NVIDIA A100-SXM4-40GB GPU** with **CUDA 12.6** with a batch size of 64.
|
165 |
+
* For Metal tests, we used a **macOS ARM64 192GB 76-core Mac Studio M2-Ultra** with **macOS 15** with a batch size of 1.
|
166 |
+
""",
|
167 |
+
elem_classes="markdown-text"
|
168 |
+
)
|
169 |
|
170 |
with gr.Row():
|
171 |
backend_filter = gr.Dropdown(
|