MirakramAghalarov commited on
Commit
254da86
·
1 Parent(s): 8526c25

added about and changed column names

Browse files
Files changed (2) hide show
  1. app.py +10 -1
  2. src/display/about.py +20 -4
app.py CHANGED
@@ -8,6 +8,8 @@ os.environ['CURL_CA_BUNDLE'] = ''
8
  from src.display.about import (
9
  EVALUATION_QUEUE_TEXT,
10
  INTRODUCTION_TEXT,
 
 
11
  TITLE,
12
  )
13
  from src.display.css_html_js import custom_css
@@ -232,6 +234,13 @@ with demo:
232
  datatype=EVAL_TYPES,
233
  row_count=5,
234
  )
 
 
 
 
 
 
 
235
  with gr.Row():
236
  gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
237
 
@@ -246,7 +255,7 @@ with demo:
246
  choices=['safetensors', 'gguf'],
247
  label="Weights type",
248
  multiselect=False,
249
- value='safetensors',
250
  interactive=True,
251
  )
252
 
 
8
  from src.display.about import (
9
  EVALUATION_QUEUE_TEXT,
10
  INTRODUCTION_TEXT,
11
+ LLM_BENCHMARKS_TEXT,
12
+ LLM_DATASET_TEXT,
13
  TITLE,
14
  )
15
  from src.display.css_html_js import custom_css
 
234
  datatype=EVAL_TYPES,
235
  row_count=5,
236
  )
237
+ with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
238
+ gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
239
+
240
+ with gr.TabItem("📝 Evaluation Datasets", elem_id="llm-benchmark-tab-table", id=4):
241
+ gr.Markdown(LLM_DATASET_TEXT, elem_classes="markdown-text")
242
+
243
+
244
  with gr.Row():
245
  gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
246
 
 
255
  choices=['safetensors', 'gguf'],
256
  label="Weights type",
257
  multiselect=False,
258
+ value='safgit petensors',
259
  interactive=True,
260
  )
261
 
src/display/about.py CHANGED
@@ -12,10 +12,14 @@ class Task:
12
  class Tasks(Enum):
13
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
14
  task0 = Task("MMLU", "metric_name", "MMLU")
15
- task1 = Task("task_1", "metric_name", "task_1")
16
- task2 = Task("task_2", "metric_name", "task_2")
17
- task3 = Task("task_3", "metric_name", "task_3")
18
- task4 = Task("task_4", "metric_name", "task_4")
 
 
 
 
19
 
20
 
21
  # Your leaderboard name
@@ -31,6 +35,18 @@ If you have a fine-tuned Azerbaijani LLM, submit it for evaluation!
31
 
32
  """
33
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
  EVALUATION_QUEUE_TEXT = """
36
  ## Some good practices before submitting a model
 
12
  class Tasks(Enum):
13
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
14
  task0 = Task("MMLU", "metric_name", "MMLU")
15
+ task1 = Task("Synthetic_QA", "metric_name", "Synthetic_QA")
16
+ task2 = Task("Support_MC", "metric_name", "Support_MC")
17
+ task3 = Task("Context_QA", "metric_name", "Context_QA")
18
+ task4 = Task("Banking_MC", "metric_name", "Banking_MC")
19
+ task5 = Task("ARC", "metric_name", "ARC")
20
+ task6 = Task("Binary_QA", "metric_name", "Binary_QA")
21
+ task7 = Task("ANL_Quad", "metric_name", "ANL_Quad")
22
+
23
 
24
 
25
  # Your leaderboard name
 
35
 
36
  """
37
 
38
+ LLM_BENCHMARKS_TEXT = f"""
39
+ ## How it works
40
+ ## Reproducibility
41
+ HERE we have about part
42
+ """
43
+
44
+ LLM_DATASET_TEXT = f"""
45
+ ## How it works
46
+ ## Reproducibility
47
+ HERE we have about part
48
+ """
49
+
50
 
51
  EVALUATION_QUEUE_TEXT = """
52
  ## Some good practices before submitting a model