Commit
·
eedd038
1
Parent(s):
db5caec
mmlu
Browse files- train.jsonl +1 -1
train.jsonl
CHANGED
@@ -197,7 +197,7 @@
|
|
197 |
{"name": "med_mcqa", "hf_repo": "lighteval/med_mcqa", "hf_subset": "default","hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["loglikelihood_acc", "exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "med_mcqa"}
|
198 |
{"name": "med_paragraph_simplification", "hf_repo": "lighteval/med_paragraph_simplification", "hf_subset": "default","hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 512, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "rouge_l", "bleu_1", "bleu_4", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "med_paragraph_simplification"}
|
199 |
{"name": "med_qa", "hf_repo": "bigbio/med_qa", "hf_subset": "med_qa_en_source", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["loglikelihood_acc", "exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "med_qa"}
|
200 |
-
{"name": "mmlu", "hf_repo": "
|
201 |
{"name": "ms_marco_regular", "hf_repo": "lighteval/ms_marco", "hf_subset": "regular", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["trec"], "suite": ["helm"], "prompt_function": "ms_marco"}
|
202 |
{"name": "ms_marco_trec", "hf_repo": "lighteval/ms_marco", "hf_subset": "trec", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["trec"], "suite": ["helm"], "prompt_function": "ms_marco"}
|
203 |
{"name": "narrativeqa", "hf_repo": "narrativeqa", "hf_subset": "default","hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "rouge_l", "bleu_1", "bleu_4", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "narrativeqa"}
|
|
|
197 |
{"name": "med_mcqa", "hf_repo": "lighteval/med_mcqa", "hf_subset": "default","hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["loglikelihood_acc", "exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "med_mcqa"}
|
198 |
{"name": "med_paragraph_simplification", "hf_repo": "lighteval/med_paragraph_simplification", "hf_subset": "default","hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 512, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "rouge_l", "bleu_1", "bleu_4", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "med_paragraph_simplification"}
|
199 |
{"name": "med_qa", "hf_repo": "bigbio/med_qa", "hf_subset": "med_qa_en_source", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["loglikelihood_acc", "exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "med_qa"}
|
200 |
+
{"name": "mmlu", "hf_repo": "lighteval/mmlu", "hf_subset": "all","hf_avail_splits": ["auxiliary_train", "test", "validation", "dev"], "evaluation_splits": ["validation", "test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["loglikelihood_acc", "exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "mmlu"}
|
201 |
{"name": "ms_marco_regular", "hf_repo": "lighteval/ms_marco", "hf_subset": "regular", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["trec"], "suite": ["helm"], "prompt_function": "ms_marco"}
|
202 |
{"name": "ms_marco_trec", "hf_repo": "lighteval/ms_marco", "hf_subset": "trec", "hf_avail_splits": ["train", "validation"], "evaluation_splits": ["validation"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["trec"], "suite": ["helm"], "prompt_function": "ms_marco"}
|
203 |
{"name": "narrativeqa", "hf_repo": "narrativeqa", "hf_subset": "default","hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "f1_score", "rouge_l", "bleu_1", "bleu_4", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "narrativeqa"}
|