abhinav-joshi commited on
Commit
34230e7
β€’
1 Parent(s): 7686c55

remove additional sub sections

Browse files
Files changed (2) hide show
  1. app.py +78 -78
  2. src/about.py +0 -1
app.py CHANGED
@@ -247,88 +247,88 @@ with demo:
247
  queue=True,
248
  )
249
 
250
- with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
251
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
252
-
253
- with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
254
- with gr.Column():
255
- with gr.Row():
256
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
257
-
258
- with gr.Column():
259
- with gr.Accordion(
260
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
261
- open=False,
262
- ):
263
- with gr.Row():
264
- finished_eval_table = gr.components.Dataframe(
265
- value=finished_eval_queue_df,
266
- headers=EVAL_COLS,
267
- datatype=EVAL_TYPES,
268
- row_count=5,
269
- )
270
- with gr.Accordion(
271
- f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
272
- open=False,
273
- ):
274
- with gr.Row():
275
- running_eval_table = gr.components.Dataframe(
276
- value=running_eval_queue_df,
277
- headers=EVAL_COLS,
278
- datatype=EVAL_TYPES,
279
- row_count=5,
280
- )
281
-
282
- with gr.Accordion(
283
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
284
- open=False,
285
- ):
286
- with gr.Row():
287
- pending_eval_table = gr.components.Dataframe(
288
- value=pending_eval_queue_df,
289
- headers=EVAL_COLS,
290
- datatype=EVAL_TYPES,
291
- row_count=5,
292
- )
293
- # with gr.Row():
294
- # gr.Markdown("# βœ‰οΈβœ¨ Submit your Results here!", elem_classes="markdown-text")
295
-
296
- # with gr.Row():
297
- # with gr.Column():
298
- # model_name_textbox = gr.Textbox(label="Model name")
299
- # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
300
- # model_type = gr.Dropdown(
301
- # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
302
- # label="Model type",
303
- # multiselect=False,
304
- # value=None,
305
- # interactive=True,
306
- # )
307
-
308
- # with gr.Column():
309
- # precision = gr.Dropdown(
310
- # choices=[i.value.name for i in Precision if i != Precision.Unknown],
311
- # label="Precision",
312
- # multiselect=False,
313
- # value="float16",
314
- # interactive=True,
315
- # )
316
- # weight_type = gr.Dropdown(
317
- # choices=[i.value.name for i in WeightType],
318
- # label="Weights type",
319
- # multiselect=False,
320
- # value="Original",
321
- # interactive=True,
322
- # )
323
- # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
324
 
325
  with gr.Accordion("Submit a new model for evaluation"):
326
  with gr.Row():
327
  with gr.Column():
328
  method_name_textbox = gr.Textbox(label="Method name")
329
  # llama, phi
330
- model_family_radio = gr.Radio(["llama", "phi"], value="llama", label="Model family")
331
- forget_rate_radio = gr.Radio(["1%", "5%", "10%"], value="10%", label="Forget rate")
332
  url_textbox = gr.Textbox(label="Url to model information")
333
  with gr.Column():
334
  organisation = gr.Textbox(label="Organisation")
@@ -341,8 +341,8 @@ with demo:
341
  add_new_eval,
342
  [
343
  method_name_textbox,
344
- model_family_radio,
345
- forget_rate_radio,
346
  url_textbox,
347
  file_output,
348
  organisation,
 
247
  queue=True,
248
  )
249
 
250
+ # with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
251
+ # gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
252
+
253
+ # with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
254
+ # with gr.Column():
255
+ # with gr.Row():
256
+ # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
257
+
258
+ # with gr.Column():
259
+ # with gr.Accordion(
260
+ # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
261
+ # open=False,
262
+ # ):
263
+ # with gr.Row():
264
+ # finished_eval_table = gr.components.Dataframe(
265
+ # value=finished_eval_queue_df,
266
+ # headers=EVAL_COLS,
267
+ # datatype=EVAL_TYPES,
268
+ # row_count=5,
269
+ # )
270
+ # with gr.Accordion(
271
+ # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
272
+ # open=False,
273
+ # ):
274
+ # with gr.Row():
275
+ # running_eval_table = gr.components.Dataframe(
276
+ # value=running_eval_queue_df,
277
+ # headers=EVAL_COLS,
278
+ # datatype=EVAL_TYPES,
279
+ # row_count=5,
280
+ # )
281
+
282
+ # with gr.Accordion(
283
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
284
+ # open=False,
285
+ # ):
286
+ # with gr.Row():
287
+ # pending_eval_table = gr.components.Dataframe(
288
+ # value=pending_eval_queue_df,
289
+ # headers=EVAL_COLS,
290
+ # datatype=EVAL_TYPES,
291
+ # row_count=5,
292
+ # )
293
+ # with gr.Row():
294
+ # gr.Markdown("# βœ‰οΈβœ¨ Submit your Results here!", elem_classes="markdown-text")
295
+
296
+ # with gr.Row():
297
+ # with gr.Column():
298
+ # model_name_textbox = gr.Textbox(label="Model name")
299
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
300
+ # model_type = gr.Dropdown(
301
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
302
+ # label="Model type",
303
+ # multiselect=False,
304
+ # value=None,
305
+ # interactive=True,
306
+ # )
307
+
308
+ # with gr.Column():
309
+ # precision = gr.Dropdown(
310
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
311
+ # label="Precision",
312
+ # multiselect=False,
313
+ # value="float16",
314
+ # interactive=True,
315
+ # )
316
+ # weight_type = gr.Dropdown(
317
+ # choices=[i.value.name for i in WeightType],
318
+ # label="Weights type",
319
+ # multiselect=False,
320
+ # value="Original",
321
+ # interactive=True,
322
+ # )
323
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
324
 
325
  with gr.Accordion("Submit a new model for evaluation"):
326
  with gr.Row():
327
  with gr.Column():
328
  method_name_textbox = gr.Textbox(label="Method name")
329
  # llama, phi
330
+ # model_family_radio = gr.Radio(["llama", "phi"], value="llama", label="Model family")
331
+ # forget_rate_radio = gr.Radio(["1%", "5%", "10%"], value="10%", label="Forget rate")
332
  url_textbox = gr.Textbox(label="Url to model information")
333
  with gr.Column():
334
  organisation = gr.Textbox(label="Organisation")
 
341
  add_new_eval,
342
  [
343
  method_name_textbox,
344
+ # model_family_radio,
345
+ # forget_rate_radio,
346
  url_textbox,
347
  file_output,
348
  organisation,
src/about.py CHANGED
@@ -34,7 +34,6 @@ TITLE = """<h1 align="center" id="space-title">IL-TUR Leaderboard</h1>"""
34
 
35
  # What does your leaderboard evaluate?
36
  INTRODUCTION_TEXT = """
37
- Intro text
38
  """
39
 
40
  # Which evaluations are you running? how can people reproduce what you have?
 
34
 
35
  # What does your leaderboard evaluate?
36
  INTRODUCTION_TEXT = """
 
37
  """
38
 
39
  # Which evaluations are you running? how can people reproduce what you have?