xeon27 commited on
Commit
6410971
·
1 Parent(s): 159e996

[WIP] Add task link in description

Browse files
Files changed (1) hide show
  1. src/about.py +5 -2
src/about.py CHANGED
@@ -44,14 +44,17 @@ TITLE = """<h1 align="center" id="space-title">LLM Evaluation Leaderboard</h1>""
44
 
45
  # What does your leaderboard evaluate?
46
  INTRODUCTION_TEXT = """
47
- This leaderboard presents the performance of selected LLM models on a set of tasks. The tasks are divided into two categories: base and agentic. The base tasks are: [ARC-Easy](https://github.com/UKGovernmentBEIS/inspect_evals/tree/main/src/inspect_evals/arc), ARC-Challenge, DROP, WinoGrande, GSM8K, HellaSwag, HumanEval, IFEval, MATH, MMLU, MMLU-Pro, GPQA-Diamond. The agentic tasks are GAIA and GDM-InterCode-CTF.
48
- """
 
49
 
50
  # Which evaluations are you running? how can people reproduce what you have?
51
  LLM_BENCHMARKS_TEXT = f"""
52
  ## How it works
53
  The following benchmarks are included:
 
54
  Base: [ARC-Easy](https://github.com/UKGovernmentBEIS/inspect_evals/tree/main/src/inspect_evals/arc), ARC-Challenge, DROP, WinoGrande, GSM8K, HellaSwag, HumanEval, IFEval, MATH, MMLU, MMLU-Pro, GPQA-Diamond
 
55
  Agentic: GAIA, GDM-InterCode-CTF
56
 
57
  ## Reproducibility
 
44
 
45
  # What does your leaderboard evaluate?
46
  INTRODUCTION_TEXT = """
47
+ This leaderboard presents the performance of selected LLM models on a set of tasks. The tasks are divided into two categories: base and agentic. The base tasks are:
48
+ """ + ", ".join([f"[{task.value.col_name}]({task.value.source})" for task in Tasks if task.value.type == "base"]) + """. The agentic tasks are:
49
+ """ + ", ".join([f"[{task.value.col_name}]({task.value.source})" for task in Tasks if task.value.type == "agentic"]) + """."""
50
 
51
  # Which evaluations are you running? how can people reproduce what you have?
52
  LLM_BENCHMARKS_TEXT = f"""
53
  ## How it works
54
  The following benchmarks are included:
55
+
56
  Base: [ARC-Easy](https://github.com/UKGovernmentBEIS/inspect_evals/tree/main/src/inspect_evals/arc), ARC-Challenge, DROP, WinoGrande, GSM8K, HellaSwag, HumanEval, IFEval, MATH, MMLU, MMLU-Pro, GPQA-Diamond
57
+
58
  Agentic: GAIA, GDM-InterCode-CTF
59
 
60
  ## Reproducibility