Commit
·
1e389cd
1
Parent(s):
d59eebf
Update train.jsonl
Browse files- train.jsonl +2 -2
train.jsonl
CHANGED
@@ -100,7 +100,7 @@
|
|
100 |
{"name": "dyck_language_3", "hf_repo": "lighteval/DyckLanguage", "hf_subset": "3", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match_indicator", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "dyck_language"}
|
101 |
{"name": "dyck_language_4", "hf_repo": "lighteval/DyckLanguage", "hf_subset": "4", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match_indicator", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "dyck_language"}
|
102 |
{"name": "humaneval", "hf_repo": "openai_humaneval", "hf_subset": "", "hf_avail_splits": ["test"], "evaluation_splits": ["test"], "generation_size": 600, "stop_sequence": ["\nclass", "\ndef", "\nif", "\nprint"], "metric": ["code_eval_he", "bias", "toxicity"], "suite": ["helm", "code_scenario"], "prompt_function": "humaneval"}
|
103 |
-
{"name": "apps", "hf_repo": "codeparrot/apps", "hf_subset": "", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 600, "stop_sequence": [""
|
104 |
{"name": "hellaswag", "hf_repo": "hellaswag", "hf_subset": "", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "hellaswag_helm"}
|
105 |
{"name": "openbookqa", "hf_repo": "openbookqa", "hf_subset": "main", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "commonsense_scenario"], "prompt_function": "openbookqa"}
|
106 |
{"name": "commonsenseqa", "hf_repo": "commonsense_qa", "hf_subset": "", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "commonsense_scenario"], "prompt_function": "commonsense_qa"}
|
@@ -352,4 +352,4 @@
|
|
352 |
{"name": "wmt_14_de-en", "hf_repo": "lighteval/wmt_14", "hf_subset": "de-en", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["bleu"], "suite": ["helm"], "prompt_function": "wmt_14_de_en"}
|
353 |
{"name": "wmt_14_fr-en", "hf_repo": "lighteval/wmt_14", "hf_subset": "fr-en", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["bleu"], "suite": ["helm"], "prompt_function": "wmt_14_fr_en"}
|
354 |
{"name": "wmt_14_hi-en", "hf_repo": "lighteval/wmt_14", "hf_subset": "hi-en", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["bleu"], "suite": ["helm"], "prompt_function": "wmt_14_hi_en"}
|
355 |
-
{"name": "wmt_14_ru-en", "hf_repo": "lighteval/wmt_14", "hf_subset": "ru-en", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["bleu"], "suite": ["helm"], "prompt_function": "wmt_14_ru_en"}
|
|
|
100 |
{"name": "dyck_language_3", "hf_repo": "lighteval/DyckLanguage", "hf_subset": "3", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match_indicator", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "dyck_language"}
|
101 |
{"name": "dyck_language_4", "hf_repo": "lighteval/DyckLanguage", "hf_subset": "4", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match_indicator", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "dyck_language"}
|
102 |
{"name": "humaneval", "hf_repo": "openai_humaneval", "hf_subset": "", "hf_avail_splits": ["test"], "evaluation_splits": ["test"], "generation_size": 600, "stop_sequence": ["\nclass", "\ndef", "\nif", "\nprint"], "metric": ["code_eval_he", "bias", "toxicity"], "suite": ["helm", "code_scenario"], "prompt_function": "humaneval"}
|
103 |
+
{"name": "apps", "hf_repo": "codeparrot/apps", "hf_subset": "", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 600, "stop_sequence": ["'''", "---", "\"\"\"", "\n\n\n"], "metric": ["code_eval_apps", "bias", "toxicity"], "suite": ["helm", "code_scenario"], "prompt_function": "apps"}
|
104 |
{"name": "hellaswag", "hf_repo": "hellaswag", "hf_subset": "", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "hellaswag_helm"}
|
105 |
{"name": "openbookqa", "hf_repo": "openbookqa", "hf_subset": "main", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "commonsense_scenario"], "prompt_function": "openbookqa"}
|
106 |
{"name": "commonsenseqa", "hf_repo": "commonsense_qa", "hf_subset": "", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "commonsense_scenario"], "prompt_function": "commonsense_qa"}
|
|
|
352 |
{"name": "wmt_14_de-en", "hf_repo": "lighteval/wmt_14", "hf_subset": "de-en", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["bleu"], "suite": ["helm"], "prompt_function": "wmt_14_de_en"}
|
353 |
{"name": "wmt_14_fr-en", "hf_repo": "lighteval/wmt_14", "hf_subset": "fr-en", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["bleu"], "suite": ["helm"], "prompt_function": "wmt_14_fr_en"}
|
354 |
{"name": "wmt_14_hi-en", "hf_repo": "lighteval/wmt_14", "hf_subset": "hi-en", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["bleu"], "suite": ["helm"], "prompt_function": "wmt_14_hi_en"}
|
355 |
+
{"name": "wmt_14_ru-en", "hf_repo": "lighteval/wmt_14", "hf_subset": "ru-en", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": 100, "stop_sequence": ["\n"], "metric": ["bleu"], "suite": ["helm"], "prompt_function": "wmt_14_ru_en"}
|