clefourrier HF Staff commited on
Commit
6876448
·
1 Parent(s): bf7fe4d

typo for lsat_qa

Browse files
Files changed (1) hide show
  1. train.jsonl +5 -5
train.jsonl CHANGED
@@ -179,11 +179,11 @@
179
  {"name": "lextreme_lener_br", "hf_repo": "lighteval/lextreme", "hf_subset": "lener_br", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 338, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score"], "suite": ["helm", "lextreme_scenario"], "prompt_function": "lextreme_lener_br"}
180
  {"name": "lextreme_mapa_coarse", "hf_repo": "lighteval/lextreme", "hf_subset": "mapa_coarse", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 274, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score"], "suite": ["helm", "lextreme_scenario"], "prompt_function": "lextreme_mapa_coarse"}
181
  {"name": "lextreme_mapa_fine", "hf_repo": "lighteval/lextreme", "hf_subset": "mapa_fine", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 274, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score"], "suite": ["helm", "lextreme_scenario"], "prompt_function": "lextreme_mapa_fine"}
182
- {"name": "lsat_qa_grouping", "hf_repo": "ligheval/lsat_qa", "hf_subset": "grouping", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "lsat_qa_scenario"], "prompt_function": "lsat_qa"}
183
- {"name": "lsat_qa_ordering", "hf_repo": "ligheval/lsat_qa", "hf_subset": "ordering", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "lsat_qa_scenario"], "prompt_function": "lsat_qa"}
184
- {"name": "lsat_qa_assignment", "hf_repo": "ligheval/lsat_qa", "hf_subset": "assignment", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "lsat_qa_scenario"], "prompt_function": "lsat_qa"}
185
- {"name": "lsat_qa_miscellaneous", "hf_repo": "ligheval/lsat_qa", "hf_subset": "miscellaneous", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "lsat_qa_scenario"], "prompt_function": "lsat_qa"}
186
- {"name": "lsat_qa_all", "hf_repo": "ligheval/lsat_qa", "hf_subset": "all", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "lsat_qa_scenario"], "prompt_function": "lsat_qa"}
187
  {"name": "math_algebra", "hf_repo": "lighteval/math", "hf_subset": "algebra", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["$"], "metric": ["toxicity", "bias", "math"], "suite": ["helm", "math_scenario"], "prompt_function": "math_helm"}
188
  {"name": "math_counting_and_probability", "hf_repo": "lighteval/math", "hf_subset": "counting_and_probability", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["$"], "metric": ["toxicity", "bias", "math"], "suite": ["helm", "math_scenario"], "prompt_function": "math_helm"}
189
  {"name": "math_geometry", "hf_repo": "lighteval/math", "hf_subset": "geometry", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["$"], "metric": ["toxicity", "bias", "math"], "suite": ["helm", "math_scenario"], "prompt_function": "math_helm"}
 
179
  {"name": "lextreme_lener_br", "hf_repo": "lighteval/lextreme", "hf_subset": "lener_br", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 338, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score"], "suite": ["helm", "lextreme_scenario"], "prompt_function": "lextreme_lener_br"}
180
  {"name": "lextreme_mapa_coarse", "hf_repo": "lighteval/lextreme", "hf_subset": "mapa_coarse", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 274, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score"], "suite": ["helm", "lextreme_scenario"], "prompt_function": "lextreme_mapa_coarse"}
181
  {"name": "lextreme_mapa_fine", "hf_repo": "lighteval/lextreme", "hf_subset": "mapa_fine", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 274, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score"], "suite": ["helm", "lextreme_scenario"], "prompt_function": "lextreme_mapa_fine"}
182
+ {"name": "lsat_qa_grouping", "hf_repo": "lighteval/lsat_qa", "hf_subset": "grouping", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "lsat_qa_scenario"], "prompt_function": "lsat_qa"}
183
+ {"name": "lsat_qa_ordering", "hf_repo": "lighteval/lsat_qa", "hf_subset": "ordering", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "lsat_qa_scenario"], "prompt_function": "lsat_qa"}
184
+ {"name": "lsat_qa_assignment", "hf_repo": "lighteval/lsat_qa", "hf_subset": "assignment", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "lsat_qa_scenario"], "prompt_function": "lsat_qa"}
185
+ {"name": "lsat_qa_miscellaneous", "hf_repo": "lighteval/lsat_qa", "hf_subset": "miscellaneous", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "lsat_qa_scenario"], "prompt_function": "lsat_qa"}
186
+ {"name": "lsat_qa_all", "hf_repo": "lighteval/lsat_qa", "hf_subset": "all", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "lsat_qa_scenario"], "prompt_function": "lsat_qa"}
187
  {"name": "math_algebra", "hf_repo": "lighteval/math", "hf_subset": "algebra", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["$"], "metric": ["toxicity", "bias", "math"], "suite": ["helm", "math_scenario"], "prompt_function": "math_helm"}
188
  {"name": "math_counting_and_probability", "hf_repo": "lighteval/math", "hf_subset": "counting_and_probability", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["$"], "metric": ["toxicity", "bias", "math"], "suite": ["helm", "math_scenario"], "prompt_function": "math_helm"}
189
  {"name": "math_geometry", "hf_repo": "lighteval/math", "hf_subset": "geometry", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["$"], "metric": ["toxicity", "bias", "math"], "suite": ["helm", "math_scenario"], "prompt_function": "math_helm"}