jihoo-kim commited on
Commit
f0f23f3
β€’
1 Parent(s): 7776cef

fix submission closed

Browse files
Files changed (1) hide show
  1. app.py +99 -99
app.py CHANGED
@@ -296,105 +296,105 @@ with demo:
296
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
297
  gr.Markdown(FAQ_TEXT, elem_classes="markdown-text")
298
 
299
- with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
300
- with gr.Column():
301
- with gr.Row():
302
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
303
-
304
- with gr.Column():
305
- with gr.Accordion(
306
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
307
- open=False,
308
- ):
309
- with gr.Row():
310
- finished_eval_table = gr.components.Dataframe(
311
- value=finished_eval_queue_df,
312
- headers=EVAL_COLS,
313
- datatype=EVAL_TYPES,
314
- row_count=5,
315
- )
316
- with gr.Accordion(
317
- f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
318
- open=False,
319
- ):
320
- with gr.Row():
321
- running_eval_table = gr.components.Dataframe(
322
- value=running_eval_queue_df,
323
- headers=EVAL_COLS,
324
- datatype=EVAL_TYPES,
325
- row_count=5,
326
- )
327
-
328
- with gr.Accordion(
329
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
330
- open=False,
331
- ):
332
- with gr.Row():
333
- pending_eval_table = gr.components.Dataframe(
334
- value=pending_eval_queue_df,
335
- headers=EVAL_COLS,
336
- datatype=EVAL_TYPES,
337
- row_count=5,
338
- )
339
- with gr.Accordion(
340
- f"❌ Failed Evaluations ({len(failed_eval_queue_df)})",
341
- open=False,
342
- ):
343
- with gr.Row():
344
- pending_eval_table = gr.components.Dataframe(
345
- value=failed_eval_queue_df,
346
- headers=EVAL_COLS,
347
- datatype=EVAL_TYPES,
348
- row_count=5,
349
- )
350
- with gr.Row():
351
- gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
352
-
353
- with gr.Row():
354
- with gr.Column():
355
- model_name_textbox = gr.Textbox(label="Model name")
356
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
357
- private = gr.Checkbox(False, label="Private", visible=not IS_PUBLIC)
358
- model_type = gr.Dropdown(
359
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
360
- label="Model type",
361
- multiselect=False,
362
- value=ModelType.FT.to_str(" : "),
363
- interactive=True,
364
- )
365
-
366
- with gr.Column():
367
- precision = gr.Dropdown(
368
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
369
- label="Precision",
370
- multiselect=False,
371
- value="float16",
372
- interactive=True,
373
- )
374
- weight_type = gr.Dropdown(
375
- choices=[i.value.name for i in WeightType],
376
- label="Weights type",
377
- multiselect=False,
378
- value="Original",
379
- interactive=True,
380
- )
381
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
382
-
383
- submit_button = gr.Button("Submit Evalulation!")
384
- submission_result = gr.Markdown()
385
- submit_button.click(
386
- add_new_eval,
387
- [
388
- model_name_textbox,
389
- base_model_name_textbox,
390
- revision_name_textbox,
391
- precision,
392
- private,
393
- weight_type,
394
- model_type,
395
- ],
396
- submission_result,
397
- )
398
 
399
  with gr.Row():
400
  with gr.Accordion("πŸ“™ Citation", open=False):
 
296
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
297
  gr.Markdown(FAQ_TEXT, elem_classes="markdown-text")
298
 
299
+ # with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
300
+ # with gr.Column():
301
+ # with gr.Row():
302
+ # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
303
+
304
+ # with gr.Column():
305
+ # with gr.Accordion(
306
+ # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
307
+ # open=False,
308
+ # ):
309
+ # with gr.Row():
310
+ # finished_eval_table = gr.components.Dataframe(
311
+ # value=finished_eval_queue_df,
312
+ # headers=EVAL_COLS,
313
+ # datatype=EVAL_TYPES,
314
+ # row_count=5,
315
+ # )
316
+ # with gr.Accordion(
317
+ # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
318
+ # open=False,
319
+ # ):
320
+ # with gr.Row():
321
+ # running_eval_table = gr.components.Dataframe(
322
+ # value=running_eval_queue_df,
323
+ # headers=EVAL_COLS,
324
+ # datatype=EVAL_TYPES,
325
+ # row_count=5,
326
+ # )
327
+
328
+ # with gr.Accordion(
329
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
330
+ # open=False,
331
+ # ):
332
+ # with gr.Row():
333
+ # pending_eval_table = gr.components.Dataframe(
334
+ # value=pending_eval_queue_df,
335
+ # headers=EVAL_COLS,
336
+ # datatype=EVAL_TYPES,
337
+ # row_count=5,
338
+ # )
339
+ # with gr.Accordion(
340
+ # f"❌ Failed Evaluations ({len(failed_eval_queue_df)})",
341
+ # open=False,
342
+ # ):
343
+ # with gr.Row():
344
+ # pending_eval_table = gr.components.Dataframe(
345
+ # value=failed_eval_queue_df,
346
+ # headers=EVAL_COLS,
347
+ # datatype=EVAL_TYPES,
348
+ # row_count=5,
349
+ # )
350
+ # with gr.Row():
351
+ # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
352
+
353
+ # with gr.Row():
354
+ # with gr.Column():
355
+ # model_name_textbox = gr.Textbox(label="Model name")
356
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
357
+ # private = gr.Checkbox(False, label="Private", visible=not IS_PUBLIC)
358
+ # model_type = gr.Dropdown(
359
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
360
+ # label="Model type",
361
+ # multiselect=False,
362
+ # value=ModelType.FT.to_str(" : "),
363
+ # interactive=True,
364
+ # )
365
+
366
+ # with gr.Column():
367
+ # precision = gr.Dropdown(
368
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
369
+ # label="Precision",
370
+ # multiselect=False,
371
+ # value="float16",
372
+ # interactive=True,
373
+ # )
374
+ # weight_type = gr.Dropdown(
375
+ # choices=[i.value.name for i in WeightType],
376
+ # label="Weights type",
377
+ # multiselect=False,
378
+ # value="Original",
379
+ # interactive=True,
380
+ # )
381
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
382
+
383
+ # submit_button = gr.Button("Submit Evalulation!")
384
+ # submission_result = gr.Markdown()
385
+ # submit_button.click(
386
+ # add_new_eval,
387
+ # [
388
+ # model_name_textbox,
389
+ # base_model_name_textbox,
390
+ # revision_name_textbox,
391
+ # precision,
392
+ # private,
393
+ # weight_type,
394
+ # model_type,
395
+ # ],
396
+ # submission_result,
397
+ # )
398
 
399
  with gr.Row():
400
  with gr.Accordion("πŸ“™ Citation", open=False):