Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
fix submission closed
Browse files
app.py
CHANGED
@@ -296,105 +296,105 @@ with demo:
|
|
296 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
297 |
gr.Markdown(FAQ_TEXT, elem_classes="markdown-text")
|
298 |
|
299 |
-
with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
|
399 |
with gr.Row():
|
400 |
with gr.Accordion("π Citation", open=False):
|
|
|
296 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
297 |
gr.Markdown(FAQ_TEXT, elem_classes="markdown-text")
|
298 |
|
299 |
+
# with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
300 |
+
# with gr.Column():
|
301 |
+
# with gr.Row():
|
302 |
+
# gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
303 |
+
|
304 |
+
# with gr.Column():
|
305 |
+
# with gr.Accordion(
|
306 |
+
# f"β
Finished Evaluations ({len(finished_eval_queue_df)})",
|
307 |
+
# open=False,
|
308 |
+
# ):
|
309 |
+
# with gr.Row():
|
310 |
+
# finished_eval_table = gr.components.Dataframe(
|
311 |
+
# value=finished_eval_queue_df,
|
312 |
+
# headers=EVAL_COLS,
|
313 |
+
# datatype=EVAL_TYPES,
|
314 |
+
# row_count=5,
|
315 |
+
# )
|
316 |
+
# with gr.Accordion(
|
317 |
+
# f"π Running Evaluation Queue ({len(running_eval_queue_df)})",
|
318 |
+
# open=False,
|
319 |
+
# ):
|
320 |
+
# with gr.Row():
|
321 |
+
# running_eval_table = gr.components.Dataframe(
|
322 |
+
# value=running_eval_queue_df,
|
323 |
+
# headers=EVAL_COLS,
|
324 |
+
# datatype=EVAL_TYPES,
|
325 |
+
# row_count=5,
|
326 |
+
# )
|
327 |
+
|
328 |
+
# with gr.Accordion(
|
329 |
+
# f"β³ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
|
330 |
+
# open=False,
|
331 |
+
# ):
|
332 |
+
# with gr.Row():
|
333 |
+
# pending_eval_table = gr.components.Dataframe(
|
334 |
+
# value=pending_eval_queue_df,
|
335 |
+
# headers=EVAL_COLS,
|
336 |
+
# datatype=EVAL_TYPES,
|
337 |
+
# row_count=5,
|
338 |
+
# )
|
339 |
+
# with gr.Accordion(
|
340 |
+
# f"β Failed Evaluations ({len(failed_eval_queue_df)})",
|
341 |
+
# open=False,
|
342 |
+
# ):
|
343 |
+
# with gr.Row():
|
344 |
+
# pending_eval_table = gr.components.Dataframe(
|
345 |
+
# value=failed_eval_queue_df,
|
346 |
+
# headers=EVAL_COLS,
|
347 |
+
# datatype=EVAL_TYPES,
|
348 |
+
# row_count=5,
|
349 |
+
# )
|
350 |
+
# with gr.Row():
|
351 |
+
# gr.Markdown("# βοΈβ¨ Submit your model here!", elem_classes="markdown-text")
|
352 |
+
|
353 |
+
# with gr.Row():
|
354 |
+
# with gr.Column():
|
355 |
+
# model_name_textbox = gr.Textbox(label="Model name")
|
356 |
+
# revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
|
357 |
+
# private = gr.Checkbox(False, label="Private", visible=not IS_PUBLIC)
|
358 |
+
# model_type = gr.Dropdown(
|
359 |
+
# choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
|
360 |
+
# label="Model type",
|
361 |
+
# multiselect=False,
|
362 |
+
# value=ModelType.FT.to_str(" : "),
|
363 |
+
# interactive=True,
|
364 |
+
# )
|
365 |
+
|
366 |
+
# with gr.Column():
|
367 |
+
# precision = gr.Dropdown(
|
368 |
+
# choices=[i.value.name for i in Precision if i != Precision.Unknown],
|
369 |
+
# label="Precision",
|
370 |
+
# multiselect=False,
|
371 |
+
# value="float16",
|
372 |
+
# interactive=True,
|
373 |
+
# )
|
374 |
+
# weight_type = gr.Dropdown(
|
375 |
+
# choices=[i.value.name for i in WeightType],
|
376 |
+
# label="Weights type",
|
377 |
+
# multiselect=False,
|
378 |
+
# value="Original",
|
379 |
+
# interactive=True,
|
380 |
+
# )
|
381 |
+
# base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
382 |
+
|
383 |
+
# submit_button = gr.Button("Submit Evalulation!")
|
384 |
+
# submission_result = gr.Markdown()
|
385 |
+
# submit_button.click(
|
386 |
+
# add_new_eval,
|
387 |
+
# [
|
388 |
+
# model_name_textbox,
|
389 |
+
# base_model_name_textbox,
|
390 |
+
# revision_name_textbox,
|
391 |
+
# precision,
|
392 |
+
# private,
|
393 |
+
# weight_type,
|
394 |
+
# model_type,
|
395 |
+
# ],
|
396 |
+
# submission_result,
|
397 |
+
# )
|
398 |
|
399 |
with gr.Row():
|
400 |
with gr.Accordion("π Citation", open=False):
|