clefourrier HF Staff commited on
Commit
d5bd359
·
1 Parent(s): 9ca6b68

Update train.jsonl

Browse files
Files changed (1) hide show
  1. train.jsonl +2 -2
train.jsonl CHANGED
@@ -191,8 +191,8 @@
191
  {"name": "med_paragraph_simplification", "hf_repo": "lighteval/med_paragraph_simplification", "hf_subset": "default","hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 512, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "rouge_l", "bleu_1", "bleu_4", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "med_paragraph_simplification"}
192
  {"name": "med_qa", "hf_repo": "bigbio/med_qa", "hf_subset": "med_qa_en_source", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["loglikelihood_acc", "exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "med_qa"}
193
  {"name": "mmlu", "hf_repo": "lighteval/mmlu", "hf_subset": "all","hf_avail_splits": ["auxiliary_train", "test", "validation", "dev"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["loglikelihood_acc", "exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "mmlu"}
194
- {"name": "ms_marco_regular", "hf_repo": "lighteval/ms_marco", "hf_subset": "regular", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["trec"], "suite": ["helm"], "prompt_function": "ms_marco"}
195
- {"name": "ms_marco_trec", "hf_repo": "lighteval/ms_marco", "hf_subset": "trec", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["trec"], "suite": ["helm"], "prompt_function": "ms_marco"}
196
  {"name": "narrativeqa", "hf_repo": "narrativeqa", "hf_subset": "default","hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "rouge_l", "bleu_1", "bleu_4", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "narrativeqa"}
197
  {"name": "numeracy_linear_example", "hf_repo": "lighteval/numeracy", "hf_subset": "linear_example", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "absolute_value_difference"], "suite": ["helm"], "prompt_function": "numeracy"}
198
  {"name": "numeracy_linear_standard", "hf_repo": "lighteval/numeracy", "hf_subset": "linear_standard", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "absolute_value_difference"], "suite": ["helm"], "prompt_function": "numeracy"}
 
191
  {"name": "med_paragraph_simplification", "hf_repo": "lighteval/med_paragraph_simplification", "hf_subset": "default","hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 512, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "rouge_l", "bleu_1", "bleu_4", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "med_paragraph_simplification"}
192
  {"name": "med_qa", "hf_repo": "bigbio/med_qa", "hf_subset": "med_qa_en_source", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["loglikelihood_acc", "exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "med_qa"}
193
  {"name": "mmlu", "hf_repo": "lighteval/mmlu", "hf_subset": "all","hf_avail_splits": ["auxiliary_train", "test", "validation", "dev"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["loglikelihood_acc", "exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "mmlu"}
194
+ {"name": "ms_marco_regular", "hf_repo": "lighteval/ms_marco", "hf_subset": "regular", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["ranking"], "suite": ["helm"], "prompt_function": "ms_marco"}
195
+ {"name": "ms_marco_trec", "hf_repo": "lighteval/ms_marco", "hf_subset": "trec", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["ranking"], "suite": ["helm"], "prompt_function": "ms_marco"}
196
  {"name": "narrativeqa", "hf_repo": "narrativeqa", "hf_subset": "default","hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "rouge_l", "bleu_1", "bleu_4", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "narrativeqa"}
197
  {"name": "numeracy_linear_example", "hf_repo": "lighteval/numeracy", "hf_subset": "linear_example", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "absolute_value_difference"], "suite": ["helm"], "prompt_function": "numeracy"}
198
  {"name": "numeracy_linear_standard", "hf_repo": "lighteval/numeracy", "hf_subset": "linear_standard", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 20, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "absolute_value_difference"], "suite": ["helm"], "prompt_function": "numeracy"}