clefourrier HF Staff commited on
Commit
bb9b23a
·
1 Parent(s): eedd038

removed some datasets

Browse files
Files changed (1) hide show
  1. train.jsonl +0 -4
train.jsonl CHANGED
@@ -201,10 +201,6 @@
201
  {"name": "ms_marco_regular", "hf_repo": "lighteval/ms_marco", "hf_subset": "regular", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["trec"], "suite": ["helm"], "prompt_function": "ms_marco"}
202
  {"name": "ms_marco_trec", "hf_repo": "lighteval/ms_marco", "hf_subset": "trec", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["trec"], "suite": ["helm"], "prompt_function": "ms_marco"}
203
  {"name": "narrativeqa", "hf_repo": "narrativeqa", "hf_subset": "default","hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "rouge_l", "bleu_1", "bleu_4", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "narrativeqa"}
204
- {"name": "natural_qa_openbook_wiki", "hf_repo": "lighteval/natural_questions", "hf_subset": "default", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 300, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "natural_qa_openbook_wiki"}
205
- {"name": "natural_qa_openbook_longans", "hf_repo": "lighteval/natural_questions", "hf_subset": "default", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 300, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "natural_qa_openbook_longans"}
206
- {"name": "natural_qa_closedbook", "hf_repo": "lighteval/natural_questions", "hf_subset": "default", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 300, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "natural_qa_closedbook"}
207
- {"name": "newsqa", "hf_repo": "lighteval/newsqa", "hf_subset": "default","hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 50, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "bias", "toxicity"], "suite": ["helm"], "prompt_function": "newsqa"}
208
  {"name": "numeracy_linear_example", "hf_repo": "lighteval/numeracy", "hf_subset": "linear_example", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "absolute_value_difference"], "suite": ["helm"], "prompt_function": "numeracy"}
209
  {"name": "numeracy_linear_standard", "hf_repo": "lighteval/numeracy", "hf_subset": "linear_standard", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "absolute_value_difference"], "suite": ["helm"], "prompt_function": "numeracy"}
210
  {"name": "numeracy_parabola_example", "hf_repo": "lighteval/numeracy", "hf_subset": "parabola_example", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "absolute_value_difference"], "suite": ["helm"], "prompt_function": "numeracy"}
 
201
  {"name": "ms_marco_regular", "hf_repo": "lighteval/ms_marco", "hf_subset": "regular", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["trec"], "suite": ["helm"], "prompt_function": "ms_marco"}
202
  {"name": "ms_marco_trec", "hf_repo": "lighteval/ms_marco", "hf_subset": "trec", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["trec"], "suite": ["helm"], "prompt_function": "ms_marco"}
203
  {"name": "narrativeqa", "hf_repo": "narrativeqa", "hf_subset": "default","hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "rouge_l", "bleu_1", "bleu_4", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "narrativeqa"}
 
 
 
 
204
  {"name": "numeracy_linear_example", "hf_repo": "lighteval/numeracy", "hf_subset": "linear_example", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "absolute_value_difference"], "suite": ["helm"], "prompt_function": "numeracy"}
205
  {"name": "numeracy_linear_standard", "hf_repo": "lighteval/numeracy", "hf_subset": "linear_standard", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "absolute_value_difference"], "suite": ["helm"], "prompt_function": "numeracy"}
206
  {"name": "numeracy_parabola_example", "hf_repo": "lighteval/numeracy", "hf_subset": "parabola_example", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "absolute_value_difference"], "suite": ["helm"], "prompt_function": "numeracy"}