galb-dai commited on
Commit
d7db717
·
1 Parent(s): 9dcf0eb

Plotly. Landing page.

Browse files
Files changed (3) hide show
  1. app.py +94 -3
  2. requirements.txt +2 -1
  3. src/display/css_html_js.py +4 -0
app.py CHANGED
@@ -2,6 +2,7 @@
2
 
3
  import gradio as gr
4
  import pandas as pd
 
5
  from apscheduler.schedulers.background import BackgroundScheduler
6
  from gradio_leaderboard import Leaderboard, SelectColumns
7
  from huggingface_hub import whoami
@@ -205,6 +206,69 @@ def _select_example_tab(choice: str):
205
  )
206
 
207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  # Force light theme even if HF user prefers dark
209
  blocks = gr.Blocks(
210
  css=custom_css,
@@ -214,7 +278,34 @@ blocks = gr.Blocks(
214
  with blocks:
215
 
216
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
217
- with gr.TabItem("What is FormulaOne", id=0, elem_id="what-is-tab"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
 
219
  gr.Image(
220
  "assets/banner.png",
@@ -336,7 +427,7 @@ with blocks:
336
  gr.HTML(WHAT_IS_F1_HTML_AFTER_TIER1FIG_TAIL)
337
 
338
  # Rename tab to "Leaderboard" and cap at 800px width
339
- with gr.TabItem("Leaderboard", elem_id="formulaone-leaderboard-tab-table", id=1):
340
  gr.Markdown(
341
  """
342
  Welcome to the FormulaOne leaderboard. This table tracks the performance of various systems on the FormulaOne benchmark.
@@ -348,7 +439,7 @@ with blocks:
348
  assert leaderboard_df is not None
349
  leaderboard_component = init_leaderboard(leaderboard_df)
350
 
351
- with gr.TabItem("Submit Solutions", elem_id="formulaone-submit-tab-table", id=2):
352
  logger.info("Tab submission")
353
  with gr.Column():
354
  with gr.Row():
 
2
 
3
  import gradio as gr
4
  import pandas as pd
5
+ import plotly.graph_objects as go # NEW: for interactive chart
6
  from apscheduler.schedulers.background import BackgroundScheduler
7
  from gradio_leaderboard import Leaderboard, SelectColumns
8
  from huggingface_hub import whoami
 
206
  )
207
 
208
 
209
+ # === Static, made-up results for the landing chart (not tied to leaderboard) ===
210
+ TIER_TOTALS = {"Warmup": 100, "Tier 1": 100, "Tier 2": 20} # dataset sizes
211
+ MODELS_ORDER = ["GPT-5", "Gemini 2.5 Pro", "Grok 4", "Claude Opus 4", "o3 Pro"]
212
+ STATIC_RESULTS = {
213
+ "Warmup": {
214
+ "GPT-5": 95,
215
+ "Gemini 2.5 Pro": 90,
216
+ "Grok 4": 84,
217
+ "Claude Opus 4": 92,
218
+ "o3 Pro": 88,
219
+ },
220
+ "Tier 1": {
221
+ "GPT-5": 38,
222
+ "Gemini 2.5 Pro": 30,
223
+ "Grok 4": 24,
224
+ "Claude Opus 4": 35,
225
+ "o3 Pro": 28,
226
+ },
227
+ "Tier 2": {
228
+ "GPT-5": 1,
229
+ "Gemini 2.5 Pro": 0,
230
+ "Grok 4": 0,
231
+ "Claude Opus 4": 0,
232
+ "o3 Pro": 0,
233
+ },
234
+ }
235
+
236
+
237
+ def build_accuracy_figure(tier: str):
238
+ """Create interactive bar chart with hover showing 'solved / total'."""
239
+ results = STATIC_RESULTS.get(tier, {})
240
+ total = TIER_TOTALS[tier]
241
+ x = MODELS_ORDER
242
+ y = [results[m] for m in x]
243
+ hover = [f"{m}<br><b>{v}/{total}</b> problems solved" for m, v in zip(x, y)]
244
+
245
+ fig = go.Figure(
246
+ data=[
247
+ go.Bar(
248
+ x=x,
249
+ y=y,
250
+ text=[f"{v}/{total}" for v in y],
251
+ textposition="auto",
252
+ hovertext=hover,
253
+ hoverinfo="text",
254
+ marker_line_width=0.5,
255
+ )
256
+ ]
257
+ )
258
+ fig.update_layout(
259
+ template="plotly_white",
260
+ margin=dict(l=30, r=20, t=10, b=40),
261
+ yaxis=dict(title="# Problems Solved", range=[0, total], dtick=max(5, total // 10)),
262
+ xaxis=dict(title=None),
263
+ height=420,
264
+ )
265
+ return fig
266
+
267
+
268
+ # Precompute initial figure (Warmup)
269
+ _initial_accuracy_fig = build_accuracy_figure("Warmup")
270
+
271
+
272
  # Force light theme even if HF user prefers dark
273
  blocks = gr.Blocks(
274
  css=custom_css,
 
278
  with blocks:
279
 
280
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
281
+ # === NEW LANDING TAB (first) ===
282
+ with gr.TabItem("Model Accuracy on FormulaOne", id=0, elem_id="landing-accuracy-tab"):
283
+ gr.Markdown(
284
+ "The chart below summarizes static (non-live) results for model performance on FormulaOne.",
285
+ elem_classes="markdown-text",
286
+ )
287
+
288
+ # Selector aligned to the top-right (see CSS)
289
+ with gr.Row(elem_id="f1-tier-select-row"):
290
+ tier_selector = gr.Radio(
291
+ choices=list(TIER_TOTALS.keys()),
292
+ value="Warmup",
293
+ label=None,
294
+ show_label=False,
295
+ elem_id="f1-tier-select",
296
+ )
297
+
298
+ accuracy_plot = gr.Plot(value=_initial_accuracy_fig)
299
+
300
+ # Wire selector → plot
301
+ tier_selector.change(
302
+ lambda t: build_accuracy_figure(t),
303
+ inputs=tier_selector,
304
+ outputs=accuracy_plot,
305
+ )
306
+
307
+ # Existing "What is FormulaOne" tab
308
+ with gr.TabItem("What is FormulaOne", id=1, elem_id="what-is-tab"):
309
 
310
  gr.Image(
311
  "assets/banner.png",
 
427
  gr.HTML(WHAT_IS_F1_HTML_AFTER_TIER1FIG_TAIL)
428
 
429
  # Rename tab to "Leaderboard" and cap at 800px width
430
+ with gr.TabItem("Leaderboard", elem_id="formulaone-leaderboard-tab-table", id=2):
431
  gr.Markdown(
432
  """
433
  Welcome to the FormulaOne leaderboard. This table tracks the performance of various systems on the FormulaOne benchmark.
 
439
  assert leaderboard_df is not None
440
  leaderboard_component = init_leaderboard(leaderboard_df)
441
 
442
+ with gr.TabItem("Submit Solutions", elem_id="formulaone-submit-tab-table", id=3):
443
  logger.info("Tab submission")
444
  with gr.Column():
445
  with gr.Row():
requirements.txt CHANGED
@@ -14,4 +14,5 @@ python-dateutil
14
  tqdm
15
  transformers
16
  tokenizers>=0.15.0
17
- sentencepiece
 
 
14
  tqdm
15
  transformers
16
  tokenizers>=0.15.0
17
+ sentencepiece
18
+ plotly>=5
src/display/css_html_js.py CHANGED
@@ -18,6 +18,10 @@ custom_css = """
18
  /* requested 710px */
19
  #f1-examples { max-width: 710px; margin: 0 auto; }
20
 
 
 
 
 
21
  /* Text */
22
  .f1-p, .f1-li { line-height: 1.75; color: #374151; text-wrap: pretty; overflow-wrap: break-word; hyphens: auto; }
23
 
 
18
  /* requested 710px */
19
  #f1-examples { max-width: 710px; margin: 0 auto; }
20
 
21
+ /* NEW: landing tab width + tier selector alignment */
22
+ #landing-accuracy-tab { max-width: 800px; margin-left: auto; margin-right: auto; }
23
+ #f1-tier-select-row { justify-content: flex-end; margin-bottom: 6px; }
24
+
25
  /* Text */
26
  .f1-p, .f1-li { line-height: 1.75; color: #374151; text-wrap: pretty; overflow-wrap: break-word; hyphens: auto; }
27