simonoc's picture
Upload EleutherAI/pythia-70m/step_32 with huggingface_hub
ad98aee verified
{
"results": {
"arc_challenge": {
"alias": "arc_challenge",
"acc,none": 0.20392491467576793,
"acc_stderr,none": 0.011774262478702254,
"acc_norm,none": 0.2440273037542662,
"acc_norm_stderr,none": 0.012551447627856255
},
"arc_easy": {
"alias": "arc_easy",
"acc,none": 0.26725589225589225,
"acc_stderr,none": 0.00908046324601747,
"acc_norm,none": 0.26430976430976433,
"acc_norm_stderr,none": 0.009048410451863014
},
"blimp": {
"acc,none": 0.5261641791044778,
"acc_stderr,none": 0.0017648145373235827,
"alias": "blimp"
},
"blimp_adjunct_island": {
"alias": " - blimp_adjunct_island",
"acc,none": 0.526,
"acc_stderr,none": 0.015797897758042762
},
"blimp_anaphor_gender_agreement": {
"alias": " - blimp_anaphor_gender_agreement",
"acc,none": 0.279,
"acc_stderr,none": 0.014190150117612026
},
"blimp_anaphor_number_agreement": {
"alias": " - blimp_anaphor_number_agreement",
"acc,none": 0.405,
"acc_stderr,none": 0.015531136990453042
},
"blimp_animate_subject_passive": {
"alias": " - blimp_animate_subject_passive",
"acc,none": 0.601,
"acc_stderr,none": 0.015493193313162906
},
"blimp_animate_subject_trans": {
"alias": " - blimp_animate_subject_trans",
"acc,none": 0.808,
"acc_stderr,none": 0.01246159264665998
},
"blimp_causative": {
"alias": " - blimp_causative",
"acc,none": 0.347,
"acc_stderr,none": 0.01506047203170662
},
"blimp_complex_NP_island": {
"alias": " - blimp_complex_NP_island",
"acc,none": 0.461,
"acc_stderr,none": 0.01577110420128319
},
"blimp_coordinate_structure_constraint_complex_left_branch": {
"alias": " - blimp_coordinate_structure_constraint_complex_left_branch",
"acc,none": 0.519,
"acc_stderr,none": 0.015807874268505853
},
"blimp_coordinate_structure_constraint_object_extraction": {
"alias": " - blimp_coordinate_structure_constraint_object_extraction",
"acc,none": 0.548,
"acc_stderr,none": 0.015746235865880677
},
"blimp_determiner_noun_agreement_1": {
"alias": " - blimp_determiner_noun_agreement_1",
"acc,none": 0.547,
"acc_stderr,none": 0.015749255189977582
},
"blimp_determiner_noun_agreement_2": {
"alias": " - blimp_determiner_noun_agreement_2",
"acc,none": 0.523,
"acc_stderr,none": 0.0158025542467261
},
"blimp_determiner_noun_agreement_irregular_1": {
"alias": " - blimp_determiner_noun_agreement_irregular_1",
"acc,none": 0.499,
"acc_stderr,none": 0.01581926829057682
},
"blimp_determiner_noun_agreement_irregular_2": {
"alias": " - blimp_determiner_noun_agreement_irregular_2",
"acc,none": 0.498,
"acc_stderr,none": 0.015819173374302706
},
"blimp_determiner_noun_agreement_with_adj_2": {
"alias": " - blimp_determiner_noun_agreement_with_adj_2",
"acc,none": 0.492,
"acc_stderr,none": 0.01581727492920901
},
"blimp_determiner_noun_agreement_with_adj_irregular_1": {
"alias": " - blimp_determiner_noun_agreement_with_adj_irregular_1",
"acc,none": 0.543,
"acc_stderr,none": 0.01576069159013638
},
"blimp_determiner_noun_agreement_with_adj_irregular_2": {
"alias": " - blimp_determiner_noun_agreement_with_adj_irregular_2",
"acc,none": 0.523,
"acc_stderr,none": 0.015802554246726098
},
"blimp_determiner_noun_agreement_with_adjective_1": {
"alias": " - blimp_determiner_noun_agreement_with_adjective_1",
"acc,none": 0.512,
"acc_stderr,none": 0.015814743314581818
},
"blimp_distractor_agreement_relational_noun": {
"alias": " - blimp_distractor_agreement_relational_noun",
"acc,none": 0.49,
"acc_stderr,none": 0.0158161357527732
},
"blimp_distractor_agreement_relative_clause": {
"alias": " - blimp_distractor_agreement_relative_clause",
"acc,none": 0.505,
"acc_stderr,none": 0.015818508944436663
},
"blimp_drop_argument": {
"alias": " - blimp_drop_argument",
"acc,none": 0.681,
"acc_stderr,none": 0.014746404865473493
},
"blimp_ellipsis_n_bar_1": {
"alias": " - blimp_ellipsis_n_bar_1",
"acc,none": 0.524,
"acc_stderr,none": 0.015801065586651758
},
"blimp_ellipsis_n_bar_2": {
"alias": " - blimp_ellipsis_n_bar_2",
"acc,none": 0.342,
"acc_stderr,none": 0.01500870618212173
},
"blimp_existential_there_object_raising": {
"alias": " - blimp_existential_there_object_raising",
"acc,none": 0.613,
"acc_stderr,none": 0.015410011955493928
},
"blimp_existential_there_quantifiers_1": {
"alias": " - blimp_existential_there_quantifiers_1",
"acc,none": 0.567,
"acc_stderr,none": 0.015676630912181327
},
"blimp_existential_there_quantifiers_2": {
"alias": " - blimp_existential_there_quantifiers_2",
"acc,none": 0.868,
"acc_stderr,none": 0.010709373963528035
},
"blimp_existential_there_subject_raising": {
"alias": " - blimp_existential_there_subject_raising",
"acc,none": 0.511,
"acc_stderr,none": 0.015815471195292686
},
"blimp_expletive_it_object_raising": {
"alias": " - blimp_expletive_it_object_raising",
"acc,none": 0.581,
"acc_stderr,none": 0.0156103389675778
},
"blimp_inchoative": {
"alias": " - blimp_inchoative",
"acc,none": 0.437,
"acc_stderr,none": 0.015693223928730373
},
"blimp_intransitive": {
"alias": " - blimp_intransitive",
"acc,none": 0.614,
"acc_stderr,none": 0.015402637476784376
},
"blimp_irregular_past_participle_adjectives": {
"alias": " - blimp_irregular_past_participle_adjectives",
"acc,none": 0.341,
"acc_stderr,none": 0.014998131348402707
},
"blimp_irregular_past_participle_verbs": {
"alias": " - blimp_irregular_past_participle_verbs",
"acc,none": 0.438,
"acc_stderr,none": 0.01569721001969469
},
"blimp_irregular_plural_subject_verb_agreement_1": {
"alias": " - blimp_irregular_plural_subject_verb_agreement_1",
"acc,none": 0.504,
"acc_stderr,none": 0.015818793703510886
},
"blimp_irregular_plural_subject_verb_agreement_2": {
"alias": " - blimp_irregular_plural_subject_verb_agreement_2",
"acc,none": 0.53,
"acc_stderr,none": 0.015790799515836763
},
"blimp_left_branch_island_echo_question": {
"alias": " - blimp_left_branch_island_echo_question",
"acc,none": 0.605,
"acc_stderr,none": 0.015466551464829344
},
"blimp_left_branch_island_simple_question": {
"alias": " - blimp_left_branch_island_simple_question",
"acc,none": 0.521,
"acc_stderr,none": 0.015805341148131296
},
"blimp_matrix_question_npi_licensor_present": {
"alias": " - blimp_matrix_question_npi_licensor_present",
"acc,none": 0.013,
"acc_stderr,none": 0.0035838308894036285
},
"blimp_npi_present_1": {
"alias": " - blimp_npi_present_1",
"acc,none": 0.915,
"acc_stderr,none": 0.008823426366942284
},
"blimp_npi_present_2": {
"alias": " - blimp_npi_present_2",
"acc,none": 0.862,
"acc_stderr,none": 0.010912152632504411
},
"blimp_only_npi_licensor_present": {
"alias": " - blimp_only_npi_licensor_present",
"acc,none": 0.99,
"acc_stderr,none": 0.0031480009386767797
},
"blimp_only_npi_scope": {
"alias": " - blimp_only_npi_scope",
"acc,none": 0.951,
"acc_stderr,none": 0.0068297617561409105
},
"blimp_passive_1": {
"alias": " - blimp_passive_1",
"acc,none": 0.606,
"acc_stderr,none": 0.015459721957493379
},
"blimp_passive_2": {
"alias": " - blimp_passive_2",
"acc,none": 0.576,
"acc_stderr,none": 0.015635487471405186
},
"blimp_principle_A_c_command": {
"alias": " - blimp_principle_A_c_command",
"acc,none": 0.433,
"acc_stderr,none": 0.01567663091218133
},
"blimp_principle_A_case_1": {
"alias": " - blimp_principle_A_case_1",
"acc,none": 0.834,
"acc_stderr,none": 0.011772110370812196
},
"blimp_principle_A_case_2": {
"alias": " - blimp_principle_A_case_2",
"acc,none": 0.474,
"acc_stderr,none": 0.01579789775804277
},
"blimp_principle_A_domain_1": {
"alias": " - blimp_principle_A_domain_1",
"acc,none": 0.788,
"acc_stderr,none": 0.012931481864938034
},
"blimp_principle_A_domain_2": {
"alias": " - blimp_principle_A_domain_2",
"acc,none": 0.601,
"acc_stderr,none": 0.015493193313162908
},
"blimp_principle_A_domain_3": {
"alias": " - blimp_principle_A_domain_3",
"acc,none": 0.523,
"acc_stderr,none": 0.015802554246726098
},
"blimp_principle_A_reconstruction": {
"alias": " - blimp_principle_A_reconstruction",
"acc,none": 0.439,
"acc_stderr,none": 0.015701131345400774
},
"blimp_regular_plural_subject_verb_agreement_1": {
"alias": " - blimp_regular_plural_subject_verb_agreement_1",
"acc,none": 0.392,
"acc_stderr,none": 0.01544585946377129
},
"blimp_regular_plural_subject_verb_agreement_2": {
"alias": " - blimp_regular_plural_subject_verb_agreement_2",
"acc,none": 0.512,
"acc_stderr,none": 0.015814743314581818
},
"blimp_sentential_negation_npi_licensor_present": {
"alias": " - blimp_sentential_negation_npi_licensor_present",
"acc,none": 0.996,
"acc_stderr,none": 0.00199699473909873
},
"blimp_sentential_negation_npi_scope": {
"alias": " - blimp_sentential_negation_npi_scope",
"acc,none": 0.623,
"acc_stderr,none": 0.015333170125779857
},
"blimp_sentential_subject_island": {
"alias": " - blimp_sentential_subject_island",
"acc,none": 0.45,
"acc_stderr,none": 0.015740004693383845
},
"blimp_superlative_quantifiers_1": {
"alias": " - blimp_superlative_quantifiers_1",
"acc,none": 0.029,
"acc_stderr,none": 0.005309160685756978
},
"blimp_superlative_quantifiers_2": {
"alias": " - blimp_superlative_quantifiers_2",
"acc,none": 0.264,
"acc_stderr,none": 0.01394627184944047
},
"blimp_tough_vs_raising_1": {
"alias": " - blimp_tough_vs_raising_1",
"acc,none": 0.414,
"acc_stderr,none": 0.015583544104177515
},
"blimp_tough_vs_raising_2": {
"alias": " - blimp_tough_vs_raising_2",
"acc,none": 0.598,
"acc_stderr,none": 0.015512467135715084
},
"blimp_transitive": {
"alias": " - blimp_transitive",
"acc,none": 0.523,
"acc_stderr,none": 0.0158025542467261
},
"blimp_wh_island": {
"alias": " - blimp_wh_island",
"acc,none": 0.369,
"acc_stderr,none": 0.01526669813915462
},
"blimp_wh_questions_object_gap": {
"alias": " - blimp_wh_questions_object_gap",
"acc,none": 0.326,
"acc_stderr,none": 0.014830507204541035
},
"blimp_wh_questions_subject_gap": {
"alias": " - blimp_wh_questions_subject_gap",
"acc,none": 0.167,
"acc_stderr,none": 0.01180043432464459
},
"blimp_wh_questions_subject_gap_long_distance": {
"alias": " - blimp_wh_questions_subject_gap_long_distance",
"acc,none": 0.26,
"acc_stderr,none": 0.013877773329774166
},
"blimp_wh_vs_that_no_gap": {
"alias": " - blimp_wh_vs_that_no_gap",
"acc,none": 0.215,
"acc_stderr,none": 0.012997843819031817
},
"blimp_wh_vs_that_no_gap_long_distance": {
"alias": " - blimp_wh_vs_that_no_gap_long_distance",
"acc,none": 0.243,
"acc_stderr,none": 0.013569640199177445
},
"blimp_wh_vs_that_with_gap": {
"alias": " - blimp_wh_vs_that_with_gap",
"acc,none": 0.814,
"acc_stderr,none": 0.012310790208412796
},
"blimp_wh_vs_that_with_gap_long_distance": {
"alias": " - blimp_wh_vs_that_with_gap_long_distance",
"acc,none": 0.75,
"acc_stderr,none": 0.013699915608779773
},
"lambada_openai": {
"alias": "lambada_openai",
"perplexity,none": 3288700.9343122425,
"perplexity_stderr,none": 312029.6781255463,
"acc,none": 0.0,
"acc_stderr,none": 0.0
},
"logiqa": {
"alias": "logiqa",
"acc,none": 0.21812596006144394,
"acc_stderr,none": 0.016198149258419312,
"acc_norm,none": 0.23655913978494625,
"acc_norm_stderr,none": 0.016668667667174192
},
"mmlu": {
"acc,none": 0.24619000142429853,
"acc_stderr,none": 0.003629685381682428,
"alias": "mmlu"
},
"mmlu_humanities": {
"acc,none": 0.2452709883103082,
"acc_stderr,none": 0.006276292424886835,
"alias": " - humanities"
},
"mmlu_formal_logic": {
"alias": " - formal_logic",
"acc,none": 0.1984126984126984,
"acc_stderr,none": 0.03567016675276865
},
"mmlu_high_school_european_history": {
"alias": " - high_school_european_history",
"acc,none": 0.24242424242424243,
"acc_stderr,none": 0.033464098810559534
},
"mmlu_high_school_us_history": {
"alias": " - high_school_us_history",
"acc,none": 0.23529411764705882,
"acc_stderr,none": 0.02977177522814563
},
"mmlu_high_school_world_history": {
"alias": " - high_school_world_history",
"acc,none": 0.2616033755274262,
"acc_stderr,none": 0.028609516716994934
},
"mmlu_international_law": {
"alias": " - international_law",
"acc,none": 0.24793388429752067,
"acc_stderr,none": 0.03941897526516302
},
"mmlu_jurisprudence": {
"alias": " - jurisprudence",
"acc,none": 0.2962962962962963,
"acc_stderr,none": 0.04414343666854933
},
"mmlu_logical_fallacies": {
"alias": " - logical_fallacies",
"acc,none": 0.24539877300613497,
"acc_stderr,none": 0.03380939813943354
},
"mmlu_moral_disputes": {
"alias": " - moral_disputes",
"acc,none": 0.24566473988439305,
"acc_stderr,none": 0.023176298203992005
},
"mmlu_moral_scenarios": {
"alias": " - moral_scenarios",
"acc,none": 0.2424581005586592,
"acc_stderr,none": 0.014333522059217887
},
"mmlu_philosophy": {
"alias": " - philosophy",
"acc,none": 0.2765273311897106,
"acc_stderr,none": 0.02540383297817962
},
"mmlu_prehistory": {
"alias": " - prehistory",
"acc,none": 0.2654320987654321,
"acc_stderr,none": 0.024569223600460845
},
"mmlu_professional_law": {
"alias": " - professional_law",
"acc,none": 0.2392438070404172,
"acc_stderr,none": 0.010896123652676669
},
"mmlu_world_religions": {
"alias": " - world_religions",
"acc,none": 0.21052631578947367,
"acc_stderr,none": 0.031267817146631786
},
"mmlu_other": {
"acc,none": 0.26778242677824265,
"acc_stderr,none": 0.007920475861060771,
"alias": " - other"
},
"mmlu_business_ethics": {
"alias": " - business_ethics",
"acc,none": 0.26,
"acc_stderr,none": 0.0440844002276808
},
"mmlu_clinical_knowledge": {
"alias": " - clinical_knowledge",
"acc,none": 0.2641509433962264,
"acc_stderr,none": 0.027134291628741713
},
"mmlu_college_medicine": {
"alias": " - college_medicine",
"acc,none": 0.2023121387283237,
"acc_stderr,none": 0.030631145539198823
},
"mmlu_global_facts": {
"alias": " - global_facts",
"acc,none": 0.31,
"acc_stderr,none": 0.04648231987117316
},
"mmlu_human_aging": {
"alias": " - human_aging",
"acc,none": 0.37668161434977576,
"acc_stderr,none": 0.032521134899291884
},
"mmlu_management": {
"alias": " - management",
"acc,none": 0.2621359223300971,
"acc_stderr,none": 0.04354631077260595
},
"mmlu_marketing": {
"alias": " - marketing",
"acc,none": 0.2564102564102564,
"acc_stderr,none": 0.028605953702004257
},
"mmlu_medical_genetics": {
"alias": " - medical_genetics",
"acc,none": 0.25,
"acc_stderr,none": 0.04351941398892446
},
"mmlu_miscellaneous": {
"alias": " - miscellaneous",
"acc,none": 0.28735632183908044,
"acc_stderr,none": 0.0161824107306827
},
"mmlu_nutrition": {
"alias": " - nutrition",
"acc,none": 0.22549019607843138,
"acc_stderr,none": 0.0239291555173513
},
"mmlu_professional_accounting": {
"alias": " - professional_accounting",
"acc,none": 0.2553191489361702,
"acc_stderr,none": 0.026011992930902
},
"mmlu_professional_medicine": {
"alias": " - professional_medicine",
"acc,none": 0.20220588235294118,
"acc_stderr,none": 0.024398192986654924
},
"mmlu_virology": {
"alias": " - virology",
"acc,none": 0.3192771084337349,
"acc_stderr,none": 0.03629335329947859
},
"mmlu_social_sciences": {
"acc,none": 0.23301917452063697,
"acc_stderr,none": 0.0076139847492523,
"alias": " - social sciences"
},
"mmlu_econometrics": {
"alias": " - econometrics",
"acc,none": 0.2807017543859649,
"acc_stderr,none": 0.04227054451232199
},
"mmlu_high_school_geography": {
"alias": " - high_school_geography",
"acc,none": 0.21212121212121213,
"acc_stderr,none": 0.029126522834586832
},
"mmlu_high_school_government_and_politics": {
"alias": " - high_school_government_and_politics",
"acc,none": 0.20725388601036268,
"acc_stderr,none": 0.029252823291803627
},
"mmlu_high_school_macroeconomics": {
"alias": " - high_school_macroeconomics",
"acc,none": 0.2205128205128205,
"acc_stderr,none": 0.021020672680827912
},
"mmlu_high_school_microeconomics": {
"alias": " - high_school_microeconomics",
"acc,none": 0.226890756302521,
"acc_stderr,none": 0.02720537153827947
},
"mmlu_high_school_psychology": {
"alias": " - high_school_psychology",
"acc,none": 0.23669724770642203,
"acc_stderr,none": 0.01822407811729907
},
"mmlu_human_sexuality": {
"alias": " - human_sexuality",
"acc,none": 0.22900763358778625,
"acc_stderr,none": 0.036853466317118506
},
"mmlu_professional_psychology": {
"alias": " - professional_psychology",
"acc,none": 0.25326797385620914,
"acc_stderr,none": 0.01759348689536683
},
"mmlu_public_relations": {
"alias": " - public_relations",
"acc,none": 0.34545454545454546,
"acc_stderr,none": 0.04554619617541054
},
"mmlu_security_studies": {
"alias": " - security_studies",
"acc,none": 0.17142857142857143,
"acc_stderr,none": 0.02412746346265016
},
"mmlu_sociology": {
"alias": " - sociology",
"acc,none": 0.23880597014925373,
"acc_stderr,none": 0.030147775935409217
},
"mmlu_us_foreign_policy": {
"alias": " - us_foreign_policy",
"acc,none": 0.21,
"acc_stderr,none": 0.04093601807403326
},
"mmlu_stem": {
"acc,none": 0.2391373295274342,
"acc_stderr,none": 0.007580170918702068,
"alias": " - stem"
},
"mmlu_abstract_algebra": {
"alias": " - abstract_algebra",
"acc,none": 0.26,
"acc_stderr,none": 0.04408440022768079
},
"mmlu_anatomy": {
"alias": " - anatomy",
"acc,none": 0.2518518518518518,
"acc_stderr,none": 0.03749850709174021
},
"mmlu_astronomy": {
"alias": " - astronomy",
"acc,none": 0.18421052631578946,
"acc_stderr,none": 0.0315469804508223
},
"mmlu_college_biology": {
"alias": " - college_biology",
"acc,none": 0.2222222222222222,
"acc_stderr,none": 0.03476590104304134
},
"mmlu_college_chemistry": {
"alias": " - college_chemistry",
"acc,none": 0.21,
"acc_stderr,none": 0.040936018074033256
},
"mmlu_college_computer_science": {
"alias": " - college_computer_science",
"acc,none": 0.15,
"acc_stderr,none": 0.0358870281282637
},
"mmlu_college_mathematics": {
"alias": " - college_mathematics",
"acc,none": 0.23,
"acc_stderr,none": 0.04229525846816506
},
"mmlu_college_physics": {
"alias": " - college_physics",
"acc,none": 0.19607843137254902,
"acc_stderr,none": 0.03950581861179964
},
"mmlu_computer_security": {
"alias": " - computer_security",
"acc,none": 0.24,
"acc_stderr,none": 0.04292346959909282
},
"mmlu_conceptual_physics": {
"alias": " - conceptual_physics",
"acc,none": 0.3276595744680851,
"acc_stderr,none": 0.030683020843231
},
"mmlu_electrical_engineering": {
"alias": " - electrical_engineering",
"acc,none": 0.2206896551724138,
"acc_stderr,none": 0.03455930201924811
},
"mmlu_elementary_mathematics": {
"alias": " - elementary_mathematics",
"acc,none": 0.2566137566137566,
"acc_stderr,none": 0.022494510767503154
},
"mmlu_high_school_biology": {
"alias": " - high_school_biology",
"acc,none": 0.25483870967741934,
"acc_stderr,none": 0.02479011845933221
},
"mmlu_high_school_chemistry": {
"alias": " - high_school_chemistry",
"acc,none": 0.2660098522167488,
"acc_stderr,none": 0.031089826002937523
},
"mmlu_high_school_computer_science": {
"alias": " - high_school_computer_science",
"acc,none": 0.23,
"acc_stderr,none": 0.042295258468165044
},
"mmlu_high_school_mathematics": {
"alias": " - high_school_mathematics",
"acc,none": 0.26296296296296295,
"acc_stderr,none": 0.02684205787383371
},
"mmlu_high_school_physics": {
"alias": " - high_school_physics",
"acc,none": 0.1986754966887417,
"acc_stderr,none": 0.03257847384436776
},
"mmlu_high_school_statistics": {
"alias": " - high_school_statistics",
"acc,none": 0.16203703703703703,
"acc_stderr,none": 0.02513045365226846
},
"mmlu_machine_learning": {
"alias": " - machine_learning",
"acc,none": 0.29464285714285715,
"acc_stderr,none": 0.043270409325787296
},
"piqa": {
"alias": "piqa",
"acc,none": 0.5277475516866159,
"acc_stderr,none": 0.01164784665606225,
"acc_norm,none": 0.5277475516866159,
"acc_norm_stderr,none": 0.01164784665606225
},
"sciq": {
"alias": "sciq",
"acc,none": 0.227,
"acc_stderr,none": 0.013253174964763914,
"acc_norm,none": 0.21,
"acc_norm_stderr,none": 0.01288666233227455
},
"wikitext": {
"alias": "wikitext",
"word_perplexity,none": 77458.0733538719,
"word_perplexity_stderr,none": "N/A",
"byte_perplexity,none": 8.208804453988343,
"byte_perplexity_stderr,none": "N/A",
"bits_per_byte,none": 3.0371721204320363,
"bits_per_byte_stderr,none": "N/A"
},
"winogrande": {
"alias": "winogrande",
"acc,none": 0.4956590370955012,
"acc_stderr,none": 0.014051956064076884
},
"wsc": {
"alias": "wsc",
"acc,none": 0.6346153846153846,
"acc_stderr,none": 0.0474473339327792
}
},
"groups": {
"blimp": {
"acc,none": 0.5261641791044778,
"acc_stderr,none": 0.0017648145373235827,
"alias": "blimp"
},
"mmlu": {
"acc,none": 0.24619000142429853,
"acc_stderr,none": 0.003629685381682428,
"alias": "mmlu"
},
"mmlu_humanities": {
"acc,none": 0.2452709883103082,
"acc_stderr,none": 0.006276292424886835,
"alias": " - humanities"
},
"mmlu_other": {
"acc,none": 0.26778242677824265,
"acc_stderr,none": 0.007920475861060771,
"alias": " - other"
},
"mmlu_social_sciences": {
"acc,none": 0.23301917452063697,
"acc_stderr,none": 0.0076139847492523,
"alias": " - social sciences"
},
"mmlu_stem": {
"acc,none": 0.2391373295274342,
"acc_stderr,none": 0.007580170918702068,
"alias": " - stem"
}
},
"group_subtasks": {
"arc_easy": [],
"arc_challenge": [],
"blimp": [
"blimp_adjunct_island",
"blimp_anaphor_gender_agreement",
"blimp_anaphor_number_agreement",
"blimp_animate_subject_passive",
"blimp_animate_subject_trans",
"blimp_causative",
"blimp_complex_NP_island",
"blimp_coordinate_structure_constraint_complex_left_branch",
"blimp_coordinate_structure_constraint_object_extraction",
"blimp_determiner_noun_agreement_1",
"blimp_determiner_noun_agreement_2",
"blimp_determiner_noun_agreement_irregular_1",
"blimp_determiner_noun_agreement_irregular_2",
"blimp_determiner_noun_agreement_with_adj_2",
"blimp_determiner_noun_agreement_with_adj_irregular_1",
"blimp_determiner_noun_agreement_with_adj_irregular_2",
"blimp_determiner_noun_agreement_with_adjective_1",
"blimp_distractor_agreement_relational_noun",
"blimp_distractor_agreement_relative_clause",
"blimp_drop_argument",
"blimp_ellipsis_n_bar_1",
"blimp_ellipsis_n_bar_2",
"blimp_existential_there_object_raising",
"blimp_existential_there_quantifiers_1",
"blimp_existential_there_quantifiers_2",
"blimp_existential_there_subject_raising",
"blimp_expletive_it_object_raising",
"blimp_inchoative",
"blimp_intransitive",
"blimp_irregular_past_participle_adjectives",
"blimp_irregular_past_participle_verbs",
"blimp_irregular_plural_subject_verb_agreement_1",
"blimp_irregular_plural_subject_verb_agreement_2",
"blimp_left_branch_island_echo_question",
"blimp_left_branch_island_simple_question",
"blimp_matrix_question_npi_licensor_present",
"blimp_npi_present_1",
"blimp_npi_present_2",
"blimp_only_npi_licensor_present",
"blimp_only_npi_scope",
"blimp_passive_1",
"blimp_passive_2",
"blimp_principle_A_c_command",
"blimp_principle_A_case_1",
"blimp_principle_A_case_2",
"blimp_principle_A_domain_1",
"blimp_principle_A_domain_2",
"blimp_principle_A_domain_3",
"blimp_principle_A_reconstruction",
"blimp_regular_plural_subject_verb_agreement_1",
"blimp_regular_plural_subject_verb_agreement_2",
"blimp_sentential_negation_npi_licensor_present",
"blimp_sentential_negation_npi_scope",
"blimp_sentential_subject_island",
"blimp_superlative_quantifiers_1",
"blimp_superlative_quantifiers_2",
"blimp_tough_vs_raising_1",
"blimp_tough_vs_raising_2",
"blimp_transitive",
"blimp_wh_island",
"blimp_wh_questions_object_gap",
"blimp_wh_questions_subject_gap",
"blimp_wh_questions_subject_gap_long_distance",
"blimp_wh_vs_that_no_gap",
"blimp_wh_vs_that_no_gap_long_distance",
"blimp_wh_vs_that_with_gap",
"blimp_wh_vs_that_with_gap_long_distance"
],
"lambada_openai": [],
"logiqa": [],
"mmlu_humanities": [
"mmlu_moral_disputes",
"mmlu_high_school_world_history",
"mmlu_jurisprudence",
"mmlu_philosophy",
"mmlu_high_school_us_history",
"mmlu_professional_law",
"mmlu_logical_fallacies",
"mmlu_moral_scenarios",
"mmlu_formal_logic",
"mmlu_prehistory",
"mmlu_high_school_european_history",
"mmlu_world_religions",
"mmlu_international_law"
],
"mmlu_social_sciences": [
"mmlu_us_foreign_policy",
"mmlu_sociology",
"mmlu_econometrics",
"mmlu_security_studies",
"mmlu_high_school_geography",
"mmlu_public_relations",
"mmlu_high_school_microeconomics",
"mmlu_professional_psychology",
"mmlu_high_school_macroeconomics",
"mmlu_human_sexuality",
"mmlu_high_school_government_and_politics",
"mmlu_high_school_psychology"
],
"mmlu_other": [
"mmlu_college_medicine",
"mmlu_medical_genetics",
"mmlu_business_ethics",
"mmlu_miscellaneous",
"mmlu_nutrition",
"mmlu_clinical_knowledge",
"mmlu_human_aging",
"mmlu_professional_accounting",
"mmlu_marketing",
"mmlu_global_facts",
"mmlu_professional_medicine",
"mmlu_virology",
"mmlu_management"
],
"mmlu_stem": [
"mmlu_elementary_mathematics",
"mmlu_electrical_engineering",
"mmlu_high_school_computer_science",
"mmlu_high_school_physics",
"mmlu_college_mathematics",
"mmlu_college_chemistry",
"mmlu_machine_learning",
"mmlu_high_school_mathematics",
"mmlu_computer_security",
"mmlu_conceptual_physics",
"mmlu_high_school_statistics",
"mmlu_high_school_biology",
"mmlu_astronomy",
"mmlu_college_computer_science",
"mmlu_college_biology",
"mmlu_college_physics",
"mmlu_anatomy",
"mmlu_high_school_chemistry",
"mmlu_abstract_algebra"
],
"mmlu": [
"mmlu_stem",
"mmlu_other",
"mmlu_social_sciences",
"mmlu_humanities"
],
"piqa": [],
"sciq": [],
"wikitext": [],
"winogrande": [],
"wsc": []
},
"configs": {
"arc_challenge": {
"task": "arc_challenge",
"tag": [
"ai2_arc"
],
"dataset_path": "allenai/ai2_arc",
"dataset_name": "ARC-Challenge",
"training_split": "train",
"validation_split": "validation",
"test_split": "test",
"doc_to_text": "Question: {{question}}\nAnswer:",
"doc_to_target": "{{choices.label.index(answerKey)}}",
"doc_to_choice": "{{choices.text}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
},
{
"metric": "acc_norm",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:",
"metadata": {
"version": 1.0
}
},
"arc_easy": {
"task": "arc_easy",
"tag": [
"ai2_arc"
],
"dataset_path": "allenai/ai2_arc",
"dataset_name": "ARC-Easy",
"training_split": "train",
"validation_split": "validation",
"test_split": "test",
"doc_to_text": "Question: {{question}}\nAnswer:",
"doc_to_target": "{{choices.label.index(answerKey)}}",
"doc_to_choice": "{{choices.text}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
},
{
"metric": "acc_norm",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:",
"metadata": {
"version": 1.0
}
},
"blimp_adjunct_island": {
"task": "blimp_adjunct_island",
"dataset_path": "blimp",
"dataset_name": "adjunct_island",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_anaphor_gender_agreement": {
"task": "blimp_anaphor_gender_agreement",
"dataset_path": "blimp",
"dataset_name": "anaphor_gender_agreement",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_anaphor_number_agreement": {
"task": "blimp_anaphor_number_agreement",
"dataset_path": "blimp",
"dataset_name": "anaphor_number_agreement",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_animate_subject_passive": {
"task": "blimp_animate_subject_passive",
"dataset_path": "blimp",
"dataset_name": "animate_subject_passive",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_animate_subject_trans": {
"task": "blimp_animate_subject_trans",
"dataset_path": "blimp",
"dataset_name": "animate_subject_trans",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_causative": {
"task": "blimp_causative",
"dataset_path": "blimp",
"dataset_name": "causative",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_complex_NP_island": {
"task": "blimp_complex_NP_island",
"dataset_path": "blimp",
"dataset_name": "complex_NP_island",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_coordinate_structure_constraint_complex_left_branch": {
"task": "blimp_coordinate_structure_constraint_complex_left_branch",
"dataset_path": "blimp",
"dataset_name": "coordinate_structure_constraint_complex_left_branch",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_coordinate_structure_constraint_object_extraction": {
"task": "blimp_coordinate_structure_constraint_object_extraction",
"dataset_path": "blimp",
"dataset_name": "coordinate_structure_constraint_object_extraction",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_determiner_noun_agreement_1": {
"task": "blimp_determiner_noun_agreement_1",
"dataset_path": "blimp",
"dataset_name": "determiner_noun_agreement_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_determiner_noun_agreement_2": {
"task": "blimp_determiner_noun_agreement_2",
"dataset_path": "blimp",
"dataset_name": "determiner_noun_agreement_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_determiner_noun_agreement_irregular_1": {
"task": "blimp_determiner_noun_agreement_irregular_1",
"dataset_path": "blimp",
"dataset_name": "determiner_noun_agreement_irregular_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_determiner_noun_agreement_irregular_2": {
"task": "blimp_determiner_noun_agreement_irregular_2",
"dataset_path": "blimp",
"dataset_name": "determiner_noun_agreement_irregular_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_determiner_noun_agreement_with_adj_2": {
"task": "blimp_determiner_noun_agreement_with_adj_2",
"dataset_path": "blimp",
"dataset_name": "determiner_noun_agreement_with_adj_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_determiner_noun_agreement_with_adj_irregular_1": {
"task": "blimp_determiner_noun_agreement_with_adj_irregular_1",
"dataset_path": "blimp",
"dataset_name": "determiner_noun_agreement_with_adj_irregular_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_determiner_noun_agreement_with_adj_irregular_2": {
"task": "blimp_determiner_noun_agreement_with_adj_irregular_2",
"dataset_path": "blimp",
"dataset_name": "determiner_noun_agreement_with_adj_irregular_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_determiner_noun_agreement_with_adjective_1": {
"task": "blimp_determiner_noun_agreement_with_adjective_1",
"dataset_path": "blimp",
"dataset_name": "determiner_noun_agreement_with_adjective_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_distractor_agreement_relational_noun": {
"task": "blimp_distractor_agreement_relational_noun",
"dataset_path": "blimp",
"dataset_name": "distractor_agreement_relational_noun",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_distractor_agreement_relative_clause": {
"task": "blimp_distractor_agreement_relative_clause",
"dataset_path": "blimp",
"dataset_name": "distractor_agreement_relative_clause",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_drop_argument": {
"task": "blimp_drop_argument",
"dataset_path": "blimp",
"dataset_name": "drop_argument",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_ellipsis_n_bar_1": {
"task": "blimp_ellipsis_n_bar_1",
"dataset_path": "blimp",
"dataset_name": "ellipsis_n_bar_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_ellipsis_n_bar_2": {
"task": "blimp_ellipsis_n_bar_2",
"dataset_path": "blimp",
"dataset_name": "ellipsis_n_bar_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_existential_there_object_raising": {
"task": "blimp_existential_there_object_raising",
"dataset_path": "blimp",
"dataset_name": "existential_there_object_raising",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_existential_there_quantifiers_1": {
"task": "blimp_existential_there_quantifiers_1",
"dataset_path": "blimp",
"dataset_name": "existential_there_quantifiers_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_existential_there_quantifiers_2": {
"task": "blimp_existential_there_quantifiers_2",
"dataset_path": "blimp",
"dataset_name": "existential_there_quantifiers_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_existential_there_subject_raising": {
"task": "blimp_existential_there_subject_raising",
"dataset_path": "blimp",
"dataset_name": "existential_there_subject_raising",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_expletive_it_object_raising": {
"task": "blimp_expletive_it_object_raising",
"dataset_path": "blimp",
"dataset_name": "expletive_it_object_raising",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_inchoative": {
"task": "blimp_inchoative",
"dataset_path": "blimp",
"dataset_name": "inchoative",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_intransitive": {
"task": "blimp_intransitive",
"dataset_path": "blimp",
"dataset_name": "intransitive",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_irregular_past_participle_adjectives": {
"task": "blimp_irregular_past_participle_adjectives",
"dataset_path": "blimp",
"dataset_name": "irregular_past_participle_adjectives",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_irregular_past_participle_verbs": {
"task": "blimp_irregular_past_participle_verbs",
"dataset_path": "blimp",
"dataset_name": "irregular_past_participle_verbs",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_irregular_plural_subject_verb_agreement_1": {
"task": "blimp_irregular_plural_subject_verb_agreement_1",
"dataset_path": "blimp",
"dataset_name": "irregular_plural_subject_verb_agreement_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_irregular_plural_subject_verb_agreement_2": {
"task": "blimp_irregular_plural_subject_verb_agreement_2",
"dataset_path": "blimp",
"dataset_name": "irregular_plural_subject_verb_agreement_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_left_branch_island_echo_question": {
"task": "blimp_left_branch_island_echo_question",
"dataset_path": "blimp",
"dataset_name": "left_branch_island_echo_question",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_left_branch_island_simple_question": {
"task": "blimp_left_branch_island_simple_question",
"dataset_path": "blimp",
"dataset_name": "left_branch_island_simple_question",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_matrix_question_npi_licensor_present": {
"task": "blimp_matrix_question_npi_licensor_present",
"dataset_path": "blimp",
"dataset_name": "matrix_question_npi_licensor_present",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_npi_present_1": {
"task": "blimp_npi_present_1",
"dataset_path": "blimp",
"dataset_name": "npi_present_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_npi_present_2": {
"task": "blimp_npi_present_2",
"dataset_path": "blimp",
"dataset_name": "npi_present_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_only_npi_licensor_present": {
"task": "blimp_only_npi_licensor_present",
"dataset_path": "blimp",
"dataset_name": "only_npi_licensor_present",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_only_npi_scope": {
"task": "blimp_only_npi_scope",
"dataset_path": "blimp",
"dataset_name": "only_npi_scope",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_passive_1": {
"task": "blimp_passive_1",
"dataset_path": "blimp",
"dataset_name": "passive_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_passive_2": {
"task": "blimp_passive_2",
"dataset_path": "blimp",
"dataset_name": "passive_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_principle_A_c_command": {
"task": "blimp_principle_A_c_command",
"dataset_path": "blimp",
"dataset_name": "principle_A_c_command",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_principle_A_case_1": {
"task": "blimp_principle_A_case_1",
"dataset_path": "blimp",
"dataset_name": "principle_A_case_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_principle_A_case_2": {
"task": "blimp_principle_A_case_2",
"dataset_path": "blimp",
"dataset_name": "principle_A_case_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_principle_A_domain_1": {
"task": "blimp_principle_A_domain_1",
"dataset_path": "blimp",
"dataset_name": "principle_A_domain_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_principle_A_domain_2": {
"task": "blimp_principle_A_domain_2",
"dataset_path": "blimp",
"dataset_name": "principle_A_domain_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_principle_A_domain_3": {
"task": "blimp_principle_A_domain_3",
"dataset_path": "blimp",
"dataset_name": "principle_A_domain_3",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_principle_A_reconstruction": {
"task": "blimp_principle_A_reconstruction",
"dataset_path": "blimp",
"dataset_name": "principle_A_reconstruction",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_regular_plural_subject_verb_agreement_1": {
"task": "blimp_regular_plural_subject_verb_agreement_1",
"dataset_path": "blimp",
"dataset_name": "regular_plural_subject_verb_agreement_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_regular_plural_subject_verb_agreement_2": {
"task": "blimp_regular_plural_subject_verb_agreement_2",
"dataset_path": "blimp",
"dataset_name": "regular_plural_subject_verb_agreement_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_sentential_negation_npi_licensor_present": {
"task": "blimp_sentential_negation_npi_licensor_present",
"dataset_path": "blimp",
"dataset_name": "sentential_negation_npi_licensor_present",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_sentential_negation_npi_scope": {
"task": "blimp_sentential_negation_npi_scope",
"dataset_path": "blimp",
"dataset_name": "sentential_negation_npi_scope",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_sentential_subject_island": {
"task": "blimp_sentential_subject_island",
"dataset_path": "blimp",
"dataset_name": "sentential_subject_island",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_superlative_quantifiers_1": {
"task": "blimp_superlative_quantifiers_1",
"dataset_path": "blimp",
"dataset_name": "superlative_quantifiers_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_superlative_quantifiers_2": {
"task": "blimp_superlative_quantifiers_2",
"dataset_path": "blimp",
"dataset_name": "superlative_quantifiers_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_tough_vs_raising_1": {
"task": "blimp_tough_vs_raising_1",
"dataset_path": "blimp",
"dataset_name": "tough_vs_raising_1",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_tough_vs_raising_2": {
"task": "blimp_tough_vs_raising_2",
"dataset_path": "blimp",
"dataset_name": "tough_vs_raising_2",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_transitive": {
"task": "blimp_transitive",
"dataset_path": "blimp",
"dataset_name": "transitive",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_wh_island": {
"task": "blimp_wh_island",
"dataset_path": "blimp",
"dataset_name": "wh_island",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_wh_questions_object_gap": {
"task": "blimp_wh_questions_object_gap",
"dataset_path": "blimp",
"dataset_name": "wh_questions_object_gap",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_wh_questions_subject_gap": {
"task": "blimp_wh_questions_subject_gap",
"dataset_path": "blimp",
"dataset_name": "wh_questions_subject_gap",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_wh_questions_subject_gap_long_distance": {
"task": "blimp_wh_questions_subject_gap_long_distance",
"dataset_path": "blimp",
"dataset_name": "wh_questions_subject_gap_long_distance",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_wh_vs_that_no_gap": {
"task": "blimp_wh_vs_that_no_gap",
"dataset_path": "blimp",
"dataset_name": "wh_vs_that_no_gap",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_wh_vs_that_no_gap_long_distance": {
"task": "blimp_wh_vs_that_no_gap_long_distance",
"dataset_path": "blimp",
"dataset_name": "wh_vs_that_no_gap_long_distance",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_wh_vs_that_with_gap": {
"task": "blimp_wh_vs_that_with_gap",
"dataset_path": "blimp",
"dataset_name": "wh_vs_that_with_gap",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"blimp_wh_vs_that_with_gap_long_distance": {
"task": "blimp_wh_vs_that_with_gap_long_distance",
"dataset_path": "blimp",
"dataset_name": "wh_vs_that_with_gap_long_distance",
"validation_split": "train",
"doc_to_text": "",
"doc_to_target": 0,
"doc_to_choice": "{{[sentence_good, sentence_bad]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
"metadata": {
"version": 1.0
}
},
"lambada_openai": {
"task": "lambada_openai",
"tag": [
"lambada"
],
"dataset_path": "EleutherAI/lambada_openai",
"dataset_name": "default",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
"doc_to_target": "{{' '+text.split(' ')[-1]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "perplexity",
"aggregation": "perplexity",
"higher_is_better": false
},
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "loglikelihood",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{text}}",
"metadata": {
"version": 1.0
}
},
"logiqa": {
"task": "logiqa",
"dataset_path": "EleutherAI/logiqa",
"dataset_name": "logiqa",
"dataset_kwargs": {
"trust_remote_code": true
},
"training_split": "train",
"validation_split": "validation",
"test_split": "test",
"doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Passage: <passage>\n Question: <question>\n Choices:\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n Answer:\n \"\"\"\n choices = [\"a\", \"b\", \"c\", \"d\"]\n prompt = \"Passage: \" + doc[\"context\"] + \"\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\nChoices:\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"Answer:\"\n return prompt\n",
"doc_to_target": "def doc_to_target(doc) -> int:\n choices = [\"a\", \"b\", \"c\", \"d\"]\n return choices.index(doc[\"label\"].strip())\n",
"doc_to_choice": "{{options}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
},
{
"metric": "acc_norm",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{context}}",
"metadata": {
"version": 1.0
}
},
"mmlu_abstract_algebra": {
"task": "mmlu_abstract_algebra",
"task_alias": "abstract_algebra",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "abstract_algebra",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_anatomy": {
"task": "mmlu_anatomy",
"task_alias": "anatomy",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "anatomy",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about anatomy.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_astronomy": {
"task": "mmlu_astronomy",
"task_alias": "astronomy",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "astronomy",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about astronomy.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_business_ethics": {
"task": "mmlu_business_ethics",
"task_alias": "business_ethics",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "business_ethics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about business ethics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_clinical_knowledge": {
"task": "mmlu_clinical_knowledge",
"task_alias": "clinical_knowledge",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "clinical_knowledge",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_college_biology": {
"task": "mmlu_college_biology",
"task_alias": "college_biology",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "college_biology",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about college biology.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_college_chemistry": {
"task": "mmlu_college_chemistry",
"task_alias": "college_chemistry",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "college_chemistry",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about college chemistry.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_college_computer_science": {
"task": "mmlu_college_computer_science",
"task_alias": "college_computer_science",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "college_computer_science",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about college computer science.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_college_mathematics": {
"task": "mmlu_college_mathematics",
"task_alias": "college_mathematics",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "college_mathematics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about college mathematics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_college_medicine": {
"task": "mmlu_college_medicine",
"task_alias": "college_medicine",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "college_medicine",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about college medicine.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_college_physics": {
"task": "mmlu_college_physics",
"task_alias": "college_physics",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "college_physics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about college physics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_computer_security": {
"task": "mmlu_computer_security",
"task_alias": "computer_security",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "computer_security",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about computer security.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_conceptual_physics": {
"task": "mmlu_conceptual_physics",
"task_alias": "conceptual_physics",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "conceptual_physics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_econometrics": {
"task": "mmlu_econometrics",
"task_alias": "econometrics",
"tag": "mmlu_social_sciences_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "econometrics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about econometrics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_electrical_engineering": {
"task": "mmlu_electrical_engineering",
"task_alias": "electrical_engineering",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "electrical_engineering",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_elementary_mathematics": {
"task": "mmlu_elementary_mathematics",
"task_alias": "elementary_mathematics",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "elementary_mathematics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_formal_logic": {
"task": "mmlu_formal_logic",
"task_alias": "formal_logic",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "formal_logic",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about formal logic.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_global_facts": {
"task": "mmlu_global_facts",
"task_alias": "global_facts",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "global_facts",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about global facts.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_biology": {
"task": "mmlu_high_school_biology",
"task_alias": "high_school_biology",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_biology",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school biology.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_chemistry": {
"task": "mmlu_high_school_chemistry",
"task_alias": "high_school_chemistry",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_chemistry",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_computer_science": {
"task": "mmlu_high_school_computer_science",
"task_alias": "high_school_computer_science",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_computer_science",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school computer science.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_european_history": {
"task": "mmlu_high_school_european_history",
"task_alias": "high_school_european_history",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_european_history",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school european history.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_geography": {
"task": "mmlu_high_school_geography",
"task_alias": "high_school_geography",
"tag": "mmlu_social_sciences_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_geography",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school geography.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_government_and_politics": {
"task": "mmlu_high_school_government_and_politics",
"task_alias": "high_school_government_and_politics",
"tag": "mmlu_social_sciences_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_government_and_politics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_macroeconomics": {
"task": "mmlu_high_school_macroeconomics",
"task_alias": "high_school_macroeconomics",
"tag": "mmlu_social_sciences_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_macroeconomics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_mathematics": {
"task": "mmlu_high_school_mathematics",
"task_alias": "high_school_mathematics",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_mathematics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_microeconomics": {
"task": "mmlu_high_school_microeconomics",
"task_alias": "high_school_microeconomics",
"tag": "mmlu_social_sciences_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_microeconomics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_physics": {
"task": "mmlu_high_school_physics",
"task_alias": "high_school_physics",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_physics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school physics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_psychology": {
"task": "mmlu_high_school_psychology",
"task_alias": "high_school_psychology",
"tag": "mmlu_social_sciences_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_psychology",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school psychology.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_statistics": {
"task": "mmlu_high_school_statistics",
"task_alias": "high_school_statistics",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_statistics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school statistics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_us_history": {
"task": "mmlu_high_school_us_history",
"task_alias": "high_school_us_history",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_us_history",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school us history.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_high_school_world_history": {
"task": "mmlu_high_school_world_history",
"task_alias": "high_school_world_history",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "high_school_world_history",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about high school world history.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_human_aging": {
"task": "mmlu_human_aging",
"task_alias": "human_aging",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "human_aging",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about human aging.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_human_sexuality": {
"task": "mmlu_human_sexuality",
"task_alias": "human_sexuality",
"tag": "mmlu_social_sciences_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "human_sexuality",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about human sexuality.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_international_law": {
"task": "mmlu_international_law",
"task_alias": "international_law",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "international_law",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about international law.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_jurisprudence": {
"task": "mmlu_jurisprudence",
"task_alias": "jurisprudence",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "jurisprudence",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_logical_fallacies": {
"task": "mmlu_logical_fallacies",
"task_alias": "logical_fallacies",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "logical_fallacies",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_machine_learning": {
"task": "mmlu_machine_learning",
"task_alias": "machine_learning",
"tag": "mmlu_stem_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "machine_learning",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about machine learning.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_management": {
"task": "mmlu_management",
"task_alias": "management",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "management",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about management.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_marketing": {
"task": "mmlu_marketing",
"task_alias": "marketing",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "marketing",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about marketing.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_medical_genetics": {
"task": "mmlu_medical_genetics",
"task_alias": "medical_genetics",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "medical_genetics",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about medical genetics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_miscellaneous": {
"task": "mmlu_miscellaneous",
"task_alias": "miscellaneous",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "miscellaneous",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_moral_disputes": {
"task": "mmlu_moral_disputes",
"task_alias": "moral_disputes",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "moral_disputes",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about moral disputes.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_moral_scenarios": {
"task": "mmlu_moral_scenarios",
"task_alias": "moral_scenarios",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "moral_scenarios",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_nutrition": {
"task": "mmlu_nutrition",
"task_alias": "nutrition",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "nutrition",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about nutrition.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_philosophy": {
"task": "mmlu_philosophy",
"task_alias": "philosophy",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "philosophy",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about philosophy.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_prehistory": {
"task": "mmlu_prehistory",
"task_alias": "prehistory",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "prehistory",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about prehistory.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_professional_accounting": {
"task": "mmlu_professional_accounting",
"task_alias": "professional_accounting",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "professional_accounting",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about professional accounting.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_professional_law": {
"task": "mmlu_professional_law",
"task_alias": "professional_law",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "professional_law",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about professional law.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_professional_medicine": {
"task": "mmlu_professional_medicine",
"task_alias": "professional_medicine",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "professional_medicine",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about professional medicine.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_professional_psychology": {
"task": "mmlu_professional_psychology",
"task_alias": "professional_psychology",
"tag": "mmlu_social_sciences_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "professional_psychology",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about professional psychology.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_public_relations": {
"task": "mmlu_public_relations",
"task_alias": "public_relations",
"tag": "mmlu_social_sciences_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "public_relations",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about public relations.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_security_studies": {
"task": "mmlu_security_studies",
"task_alias": "security_studies",
"tag": "mmlu_social_sciences_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "security_studies",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about security studies.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_sociology": {
"task": "mmlu_sociology",
"task_alias": "sociology",
"tag": "mmlu_social_sciences_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "sociology",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about sociology.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_us_foreign_policy": {
"task": "mmlu_us_foreign_policy",
"task_alias": "us_foreign_policy",
"tag": "mmlu_social_sciences_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "us_foreign_policy",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_virology": {
"task": "mmlu_virology",
"task_alias": "virology",
"tag": "mmlu_other_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "virology",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about virology.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"mmlu_world_religions": {
"task": "mmlu_world_religions",
"task_alias": "world_religions",
"tag": "mmlu_humanities_tasks",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "world_religions",
"dataset_kwargs": {
"trust_remote_code": true
},
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about world religions.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"piqa": {
"task": "piqa",
"dataset_path": "piqa",
"dataset_kwargs": {
"trust_remote_code": true
},
"training_split": "train",
"validation_split": "validation",
"doc_to_text": "Question: {{goal}}\nAnswer:",
"doc_to_target": "label",
"doc_to_choice": "{{[sol1, sol2]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
},
{
"metric": "acc_norm",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "goal",
"metadata": {
"version": 1.0
}
},
"sciq": {
"task": "sciq",
"dataset_path": "sciq",
"training_split": "train",
"validation_split": "validation",
"test_split": "test",
"doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:",
"doc_to_target": 3,
"doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
},
{
"metric": "acc_norm",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{support}} {{question}}",
"metadata": {
"version": 1.0
}
},
"wikitext": {
"task": "wikitext",
"dataset_path": "EleutherAI/wikitext_document_level",
"dataset_name": "wikitext-2-raw-v1",
"dataset_kwargs": {
"trust_remote_code": true
},
"training_split": "train",
"validation_split": "validation",
"test_split": "test",
"doc_to_text": "",
"doc_to_target": "def wikitext_detokenizer(doc):\n string = doc[\"page\"]\n # contractions\n string = string.replace(\"s '\", \"s'\")\n string = re.sub(r\"/' [0-9]/\", r\"/'[0-9]/\", string)\n # number separators\n string = string.replace(\" @-@ \", \"-\")\n string = string.replace(\" @,@ \", \",\")\n string = string.replace(\" @.@ \", \".\")\n # punctuation\n string = string.replace(\" : \", \": \")\n string = string.replace(\" ; \", \"; \")\n string = string.replace(\" . \", \". \")\n string = string.replace(\" ! \", \"! \")\n string = string.replace(\" ? \", \"? \")\n string = string.replace(\" , \", \", \")\n # double brackets\n string = re.sub(r\"\\(\\s*([^\\)]*?)\\s*\\)\", r\"(\\1)\", string)\n string = re.sub(r\"\\[\\s*([^\\]]*?)\\s*\\]\", r\"[\\1]\", string)\n string = re.sub(r\"{\\s*([^}]*?)\\s*}\", r\"{\\1}\", string)\n string = re.sub(r\"\\\"\\s*([^\\\"]*?)\\s*\\\"\", r'\"\\1\"', string)\n string = re.sub(r\"'\\s*([^']*?)\\s*'\", r\"'\\1'\", string)\n # miscellaneous\n string = string.replace(\"= = = =\", \"====\")\n string = string.replace(\"= = =\", \"===\")\n string = string.replace(\"= =\", \"==\")\n string = string.replace(\" \" + chr(176) + \" \", chr(176))\n string = string.replace(\" \\n\", \"\\n\")\n string = string.replace(\"\\n \", \"\\n\")\n string = string.replace(\" N \", \" 1 \")\n string = string.replace(\" 's\", \"'s\")\n\n return string\n",
"process_results": "def process_results(doc, results):\n (loglikelihood,) = results\n # IMPORTANT: wikitext counts number of words in *original doc before detokenization*\n _words = len(re.split(r\"\\s+\", doc[\"page\"]))\n _bytes = len(doc[\"page\"].encode(\"utf-8\"))\n return {\n \"word_perplexity\": (loglikelihood, _words),\n \"byte_perplexity\": (loglikelihood, _bytes),\n \"bits_per_byte\": (loglikelihood, _bytes),\n }\n",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "word_perplexity"
},
{
"metric": "byte_perplexity"
},
{
"metric": "bits_per_byte"
}
],
"output_type": "loglikelihood_rolling",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{page}}",
"metadata": {
"version": 2.0
}
},
"winogrande": {
"task": "winogrande",
"dataset_path": "winogrande",
"dataset_name": "winogrande_xl",
"dataset_kwargs": {
"trust_remote_code": true
},
"training_split": "train",
"validation_split": "validation",
"doc_to_text": "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n return answer_to_num[doc[\"answer\"]]\n",
"doc_to_target": "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"_\") + 1\n return doc[\"sentence\"][idx:].strip()\n",
"doc_to_choice": "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"_\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"sentence\"][:idx] + opt for opt in options]\n",
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "sentence",
"metadata": {
"version": 1.0
}
},
"wsc": {
"task": "wsc",
"tag": [
"super-glue-lm-eval-v1"
],
"dataset_path": "super_glue",
"dataset_name": "wsc.fixed",
"training_split": "train",
"validation_split": "validation",
"doc_to_text": "def default_doc_to_text(x):\n raw_passage = x[\"text\"]\n # NOTE: HuggingFace span indices are word-based not character-based.\n pre = \" \".join(raw_passage.split()[: x[\"span2_index\"]])\n post = raw_passage[len(pre) + len(x[\"span2_text\"]) + 1 :]\n passage = general_detokenize(pre + \" *{}*\".format(x[\"span2_text\"]) + post)\n noun = x[\"span1_text\"]\n pronoun = x[\"span2_text\"]\n text = (\n f\"Passage: {passage}\\n\"\n + f'Question: In the passage above, does the pronoun \"*{pronoun}*\" refer to \"*{noun}*\"?\\n'\n + \"Answer:\"\n )\n return text\n",
"doc_to_target": "label",
"doc_to_choice": [
"no",
"yes"
],
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"num_fewshot": 0,
"metric_list": [
{
"metric": "acc"
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
}
},
"versions": {
"arc_challenge": 1.0,
"arc_easy": 1.0,
"blimp": 2.0,
"blimp_adjunct_island": 1.0,
"blimp_anaphor_gender_agreement": 1.0,
"blimp_anaphor_number_agreement": 1.0,
"blimp_animate_subject_passive": 1.0,
"blimp_animate_subject_trans": 1.0,
"blimp_causative": 1.0,
"blimp_complex_NP_island": 1.0,
"blimp_coordinate_structure_constraint_complex_left_branch": 1.0,
"blimp_coordinate_structure_constraint_object_extraction": 1.0,
"blimp_determiner_noun_agreement_1": 1.0,
"blimp_determiner_noun_agreement_2": 1.0,
"blimp_determiner_noun_agreement_irregular_1": 1.0,
"blimp_determiner_noun_agreement_irregular_2": 1.0,
"blimp_determiner_noun_agreement_with_adj_2": 1.0,
"blimp_determiner_noun_agreement_with_adj_irregular_1": 1.0,
"blimp_determiner_noun_agreement_with_adj_irregular_2": 1.0,
"blimp_determiner_noun_agreement_with_adjective_1": 1.0,
"blimp_distractor_agreement_relational_noun": 1.0,
"blimp_distractor_agreement_relative_clause": 1.0,
"blimp_drop_argument": 1.0,
"blimp_ellipsis_n_bar_1": 1.0,
"blimp_ellipsis_n_bar_2": 1.0,
"blimp_existential_there_object_raising": 1.0,
"blimp_existential_there_quantifiers_1": 1.0,
"blimp_existential_there_quantifiers_2": 1.0,
"blimp_existential_there_subject_raising": 1.0,
"blimp_expletive_it_object_raising": 1.0,
"blimp_inchoative": 1.0,
"blimp_intransitive": 1.0,
"blimp_irregular_past_participle_adjectives": 1.0,
"blimp_irregular_past_participle_verbs": 1.0,
"blimp_irregular_plural_subject_verb_agreement_1": 1.0,
"blimp_irregular_plural_subject_verb_agreement_2": 1.0,
"blimp_left_branch_island_echo_question": 1.0,
"blimp_left_branch_island_simple_question": 1.0,
"blimp_matrix_question_npi_licensor_present": 1.0,
"blimp_npi_present_1": 1.0,
"blimp_npi_present_2": 1.0,
"blimp_only_npi_licensor_present": 1.0,
"blimp_only_npi_scope": 1.0,
"blimp_passive_1": 1.0,
"blimp_passive_2": 1.0,
"blimp_principle_A_c_command": 1.0,
"blimp_principle_A_case_1": 1.0,
"blimp_principle_A_case_2": 1.0,
"blimp_principle_A_domain_1": 1.0,
"blimp_principle_A_domain_2": 1.0,
"blimp_principle_A_domain_3": 1.0,
"blimp_principle_A_reconstruction": 1.0,
"blimp_regular_plural_subject_verb_agreement_1": 1.0,
"blimp_regular_plural_subject_verb_agreement_2": 1.0,
"blimp_sentential_negation_npi_licensor_present": 1.0,
"blimp_sentential_negation_npi_scope": 1.0,
"blimp_sentential_subject_island": 1.0,
"blimp_superlative_quantifiers_1": 1.0,
"blimp_superlative_quantifiers_2": 1.0,
"blimp_tough_vs_raising_1": 1.0,
"blimp_tough_vs_raising_2": 1.0,
"blimp_transitive": 1.0,
"blimp_wh_island": 1.0,
"blimp_wh_questions_object_gap": 1.0,
"blimp_wh_questions_subject_gap": 1.0,
"blimp_wh_questions_subject_gap_long_distance": 1.0,
"blimp_wh_vs_that_no_gap": 1.0,
"blimp_wh_vs_that_no_gap_long_distance": 1.0,
"blimp_wh_vs_that_with_gap": 1.0,
"blimp_wh_vs_that_with_gap_long_distance": 1.0,
"lambada_openai": 1.0,
"logiqa": 1.0,
"mmlu": 2,
"mmlu_abstract_algebra": 1.0,
"mmlu_anatomy": 1.0,
"mmlu_astronomy": 1.0,
"mmlu_business_ethics": 1.0,
"mmlu_clinical_knowledge": 1.0,
"mmlu_college_biology": 1.0,
"mmlu_college_chemistry": 1.0,
"mmlu_college_computer_science": 1.0,
"mmlu_college_mathematics": 1.0,
"mmlu_college_medicine": 1.0,
"mmlu_college_physics": 1.0,
"mmlu_computer_security": 1.0,
"mmlu_conceptual_physics": 1.0,
"mmlu_econometrics": 1.0,
"mmlu_electrical_engineering": 1.0,
"mmlu_elementary_mathematics": 1.0,
"mmlu_formal_logic": 1.0,
"mmlu_global_facts": 1.0,
"mmlu_high_school_biology": 1.0,
"mmlu_high_school_chemistry": 1.0,
"mmlu_high_school_computer_science": 1.0,
"mmlu_high_school_european_history": 1.0,
"mmlu_high_school_geography": 1.0,
"mmlu_high_school_government_and_politics": 1.0,
"mmlu_high_school_macroeconomics": 1.0,
"mmlu_high_school_mathematics": 1.0,
"mmlu_high_school_microeconomics": 1.0,
"mmlu_high_school_physics": 1.0,
"mmlu_high_school_psychology": 1.0,
"mmlu_high_school_statistics": 1.0,
"mmlu_high_school_us_history": 1.0,
"mmlu_high_school_world_history": 1.0,
"mmlu_human_aging": 1.0,
"mmlu_human_sexuality": 1.0,
"mmlu_humanities": 2,
"mmlu_international_law": 1.0,
"mmlu_jurisprudence": 1.0,
"mmlu_logical_fallacies": 1.0,
"mmlu_machine_learning": 1.0,
"mmlu_management": 1.0,
"mmlu_marketing": 1.0,
"mmlu_medical_genetics": 1.0,
"mmlu_miscellaneous": 1.0,
"mmlu_moral_disputes": 1.0,
"mmlu_moral_scenarios": 1.0,
"mmlu_nutrition": 1.0,
"mmlu_other": 2,
"mmlu_philosophy": 1.0,
"mmlu_prehistory": 1.0,
"mmlu_professional_accounting": 1.0,
"mmlu_professional_law": 1.0,
"mmlu_professional_medicine": 1.0,
"mmlu_professional_psychology": 1.0,
"mmlu_public_relations": 1.0,
"mmlu_security_studies": 1.0,
"mmlu_social_sciences": 2,
"mmlu_sociology": 1.0,
"mmlu_stem": 2,
"mmlu_us_foreign_policy": 1.0,
"mmlu_virology": 1.0,
"mmlu_world_religions": 1.0,
"piqa": 1.0,
"sciq": 1.0,
"wikitext": 2.0,
"winogrande": 1.0,
"wsc": 1.0
},
"n-shot": {
"arc_challenge": 0,
"arc_easy": 0,
"blimp_adjunct_island": 0,
"blimp_anaphor_gender_agreement": 0,
"blimp_anaphor_number_agreement": 0,
"blimp_animate_subject_passive": 0,
"blimp_animate_subject_trans": 0,
"blimp_causative": 0,
"blimp_complex_NP_island": 0,
"blimp_coordinate_structure_constraint_complex_left_branch": 0,
"blimp_coordinate_structure_constraint_object_extraction": 0,
"blimp_determiner_noun_agreement_1": 0,
"blimp_determiner_noun_agreement_2": 0,
"blimp_determiner_noun_agreement_irregular_1": 0,
"blimp_determiner_noun_agreement_irregular_2": 0,
"blimp_determiner_noun_agreement_with_adj_2": 0,
"blimp_determiner_noun_agreement_with_adj_irregular_1": 0,
"blimp_determiner_noun_agreement_with_adj_irregular_2": 0,
"blimp_determiner_noun_agreement_with_adjective_1": 0,
"blimp_distractor_agreement_relational_noun": 0,
"blimp_distractor_agreement_relative_clause": 0,
"blimp_drop_argument": 0,
"blimp_ellipsis_n_bar_1": 0,
"blimp_ellipsis_n_bar_2": 0,
"blimp_existential_there_object_raising": 0,
"blimp_existential_there_quantifiers_1": 0,
"blimp_existential_there_quantifiers_2": 0,
"blimp_existential_there_subject_raising": 0,
"blimp_expletive_it_object_raising": 0,
"blimp_inchoative": 0,
"blimp_intransitive": 0,
"blimp_irregular_past_participle_adjectives": 0,
"blimp_irregular_past_participle_verbs": 0,
"blimp_irregular_plural_subject_verb_agreement_1": 0,
"blimp_irregular_plural_subject_verb_agreement_2": 0,
"blimp_left_branch_island_echo_question": 0,
"blimp_left_branch_island_simple_question": 0,
"blimp_matrix_question_npi_licensor_present": 0,
"blimp_npi_present_1": 0,
"blimp_npi_present_2": 0,
"blimp_only_npi_licensor_present": 0,
"blimp_only_npi_scope": 0,
"blimp_passive_1": 0,
"blimp_passive_2": 0,
"blimp_principle_A_c_command": 0,
"blimp_principle_A_case_1": 0,
"blimp_principle_A_case_2": 0,
"blimp_principle_A_domain_1": 0,
"blimp_principle_A_domain_2": 0,
"blimp_principle_A_domain_3": 0,
"blimp_principle_A_reconstruction": 0,
"blimp_regular_plural_subject_verb_agreement_1": 0,
"blimp_regular_plural_subject_verb_agreement_2": 0,
"blimp_sentential_negation_npi_licensor_present": 0,
"blimp_sentential_negation_npi_scope": 0,
"blimp_sentential_subject_island": 0,
"blimp_superlative_quantifiers_1": 0,
"blimp_superlative_quantifiers_2": 0,
"blimp_tough_vs_raising_1": 0,
"blimp_tough_vs_raising_2": 0,
"blimp_transitive": 0,
"blimp_wh_island": 0,
"blimp_wh_questions_object_gap": 0,
"blimp_wh_questions_subject_gap": 0,
"blimp_wh_questions_subject_gap_long_distance": 0,
"blimp_wh_vs_that_no_gap": 0,
"blimp_wh_vs_that_no_gap_long_distance": 0,
"blimp_wh_vs_that_with_gap": 0,
"blimp_wh_vs_that_with_gap_long_distance": 0,
"lambada_openai": 0,
"logiqa": 0,
"mmlu_abstract_algebra": 0,
"mmlu_anatomy": 0,
"mmlu_astronomy": 0,
"mmlu_business_ethics": 0,
"mmlu_clinical_knowledge": 0,
"mmlu_college_biology": 0,
"mmlu_college_chemistry": 0,
"mmlu_college_computer_science": 0,
"mmlu_college_mathematics": 0,
"mmlu_college_medicine": 0,
"mmlu_college_physics": 0,
"mmlu_computer_security": 0,
"mmlu_conceptual_physics": 0,
"mmlu_econometrics": 0,
"mmlu_electrical_engineering": 0,
"mmlu_elementary_mathematics": 0,
"mmlu_formal_logic": 0,
"mmlu_global_facts": 0,
"mmlu_high_school_biology": 0,
"mmlu_high_school_chemistry": 0,
"mmlu_high_school_computer_science": 0,
"mmlu_high_school_european_history": 0,
"mmlu_high_school_geography": 0,
"mmlu_high_school_government_and_politics": 0,
"mmlu_high_school_macroeconomics": 0,
"mmlu_high_school_mathematics": 0,
"mmlu_high_school_microeconomics": 0,
"mmlu_high_school_physics": 0,
"mmlu_high_school_psychology": 0,
"mmlu_high_school_statistics": 0,
"mmlu_high_school_us_history": 0,
"mmlu_high_school_world_history": 0,
"mmlu_human_aging": 0,
"mmlu_human_sexuality": 0,
"mmlu_international_law": 0,
"mmlu_jurisprudence": 0,
"mmlu_logical_fallacies": 0,
"mmlu_machine_learning": 0,
"mmlu_management": 0,
"mmlu_marketing": 0,
"mmlu_medical_genetics": 0,
"mmlu_miscellaneous": 0,
"mmlu_moral_disputes": 0,
"mmlu_moral_scenarios": 0,
"mmlu_nutrition": 0,
"mmlu_philosophy": 0,
"mmlu_prehistory": 0,
"mmlu_professional_accounting": 0,
"mmlu_professional_law": 0,
"mmlu_professional_medicine": 0,
"mmlu_professional_psychology": 0,
"mmlu_public_relations": 0,
"mmlu_security_studies": 0,
"mmlu_sociology": 0,
"mmlu_us_foreign_policy": 0,
"mmlu_virology": 0,
"mmlu_world_religions": 0,
"piqa": 0,
"sciq": 0,
"wikitext": 0,
"winogrande": 0,
"wsc": 0
},
"higher_is_better": {
"arc_challenge": {
"acc": true,
"acc_norm": true
},
"arc_easy": {
"acc": true,
"acc_norm": true
},
"blimp": {
"acc": true
},
"blimp_adjunct_island": {
"acc": true
},
"blimp_anaphor_gender_agreement": {
"acc": true
},
"blimp_anaphor_number_agreement": {
"acc": true
},
"blimp_animate_subject_passive": {
"acc": true
},
"blimp_animate_subject_trans": {
"acc": true
},
"blimp_causative": {
"acc": true
},
"blimp_complex_NP_island": {
"acc": true
},
"blimp_coordinate_structure_constraint_complex_left_branch": {
"acc": true
},
"blimp_coordinate_structure_constraint_object_extraction": {
"acc": true
},
"blimp_determiner_noun_agreement_1": {
"acc": true
},
"blimp_determiner_noun_agreement_2": {
"acc": true
},
"blimp_determiner_noun_agreement_irregular_1": {
"acc": true
},
"blimp_determiner_noun_agreement_irregular_2": {
"acc": true
},
"blimp_determiner_noun_agreement_with_adj_2": {
"acc": true
},
"blimp_determiner_noun_agreement_with_adj_irregular_1": {
"acc": true
},
"blimp_determiner_noun_agreement_with_adj_irregular_2": {
"acc": true
},
"blimp_determiner_noun_agreement_with_adjective_1": {
"acc": true
},
"blimp_distractor_agreement_relational_noun": {
"acc": true
},
"blimp_distractor_agreement_relative_clause": {
"acc": true
},
"blimp_drop_argument": {
"acc": true
},
"blimp_ellipsis_n_bar_1": {
"acc": true
},
"blimp_ellipsis_n_bar_2": {
"acc": true
},
"blimp_existential_there_object_raising": {
"acc": true
},
"blimp_existential_there_quantifiers_1": {
"acc": true
},
"blimp_existential_there_quantifiers_2": {
"acc": true
},
"blimp_existential_there_subject_raising": {
"acc": true
},
"blimp_expletive_it_object_raising": {
"acc": true
},
"blimp_inchoative": {
"acc": true
},
"blimp_intransitive": {
"acc": true
},
"blimp_irregular_past_participle_adjectives": {
"acc": true
},
"blimp_irregular_past_participle_verbs": {
"acc": true
},
"blimp_irregular_plural_subject_verb_agreement_1": {
"acc": true
},
"blimp_irregular_plural_subject_verb_agreement_2": {
"acc": true
},
"blimp_left_branch_island_echo_question": {
"acc": true
},
"blimp_left_branch_island_simple_question": {
"acc": true
},
"blimp_matrix_question_npi_licensor_present": {
"acc": true
},
"blimp_npi_present_1": {
"acc": true
},
"blimp_npi_present_2": {
"acc": true
},
"blimp_only_npi_licensor_present": {
"acc": true
},
"blimp_only_npi_scope": {
"acc": true
},
"blimp_passive_1": {
"acc": true
},
"blimp_passive_2": {
"acc": true
},
"blimp_principle_A_c_command": {
"acc": true
},
"blimp_principle_A_case_1": {
"acc": true
},
"blimp_principle_A_case_2": {
"acc": true
},
"blimp_principle_A_domain_1": {
"acc": true
},
"blimp_principle_A_domain_2": {
"acc": true
},
"blimp_principle_A_domain_3": {
"acc": true
},
"blimp_principle_A_reconstruction": {
"acc": true
},
"blimp_regular_plural_subject_verb_agreement_1": {
"acc": true
},
"blimp_regular_plural_subject_verb_agreement_2": {
"acc": true
},
"blimp_sentential_negation_npi_licensor_present": {
"acc": true
},
"blimp_sentential_negation_npi_scope": {
"acc": true
},
"blimp_sentential_subject_island": {
"acc": true
},
"blimp_superlative_quantifiers_1": {
"acc": true
},
"blimp_superlative_quantifiers_2": {
"acc": true
},
"blimp_tough_vs_raising_1": {
"acc": true
},
"blimp_tough_vs_raising_2": {
"acc": true
},
"blimp_transitive": {
"acc": true
},
"blimp_wh_island": {
"acc": true
},
"blimp_wh_questions_object_gap": {
"acc": true
},
"blimp_wh_questions_subject_gap": {
"acc": true
},
"blimp_wh_questions_subject_gap_long_distance": {
"acc": true
},
"blimp_wh_vs_that_no_gap": {
"acc": true
},
"blimp_wh_vs_that_no_gap_long_distance": {
"acc": true
},
"blimp_wh_vs_that_with_gap": {
"acc": true
},
"blimp_wh_vs_that_with_gap_long_distance": {
"acc": true
},
"lambada_openai": {
"perplexity": false,
"acc": true
},
"logiqa": {
"acc": true,
"acc_norm": true
},
"mmlu": {
"acc": true
},
"mmlu_abstract_algebra": {
"acc": true
},
"mmlu_anatomy": {
"acc": true
},
"mmlu_astronomy": {
"acc": true
},
"mmlu_business_ethics": {
"acc": true
},
"mmlu_clinical_knowledge": {
"acc": true
},
"mmlu_college_biology": {
"acc": true
},
"mmlu_college_chemistry": {
"acc": true
},
"mmlu_college_computer_science": {
"acc": true
},
"mmlu_college_mathematics": {
"acc": true
},
"mmlu_college_medicine": {
"acc": true
},
"mmlu_college_physics": {
"acc": true
},
"mmlu_computer_security": {
"acc": true
},
"mmlu_conceptual_physics": {
"acc": true
},
"mmlu_econometrics": {
"acc": true
},
"mmlu_electrical_engineering": {
"acc": true
},
"mmlu_elementary_mathematics": {
"acc": true
},
"mmlu_formal_logic": {
"acc": true
},
"mmlu_global_facts": {
"acc": true
},
"mmlu_high_school_biology": {
"acc": true
},
"mmlu_high_school_chemistry": {
"acc": true
},
"mmlu_high_school_computer_science": {
"acc": true
},
"mmlu_high_school_european_history": {
"acc": true
},
"mmlu_high_school_geography": {
"acc": true
},
"mmlu_high_school_government_and_politics": {
"acc": true
},
"mmlu_high_school_macroeconomics": {
"acc": true
},
"mmlu_high_school_mathematics": {
"acc": true
},
"mmlu_high_school_microeconomics": {
"acc": true
},
"mmlu_high_school_physics": {
"acc": true
},
"mmlu_high_school_psychology": {
"acc": true
},
"mmlu_high_school_statistics": {
"acc": true
},
"mmlu_high_school_us_history": {
"acc": true
},
"mmlu_high_school_world_history": {
"acc": true
},
"mmlu_human_aging": {
"acc": true
},
"mmlu_human_sexuality": {
"acc": true
},
"mmlu_humanities": {
"acc": true
},
"mmlu_international_law": {
"acc": true
},
"mmlu_jurisprudence": {
"acc": true
},
"mmlu_logical_fallacies": {
"acc": true
},
"mmlu_machine_learning": {
"acc": true
},
"mmlu_management": {
"acc": true
},
"mmlu_marketing": {
"acc": true
},
"mmlu_medical_genetics": {
"acc": true
},
"mmlu_miscellaneous": {
"acc": true
},
"mmlu_moral_disputes": {
"acc": true
},
"mmlu_moral_scenarios": {
"acc": true
},
"mmlu_nutrition": {
"acc": true
},
"mmlu_other": {
"acc": true
},
"mmlu_philosophy": {
"acc": true
},
"mmlu_prehistory": {
"acc": true
},
"mmlu_professional_accounting": {
"acc": true
},
"mmlu_professional_law": {
"acc": true
},
"mmlu_professional_medicine": {
"acc": true
},
"mmlu_professional_psychology": {
"acc": true
},
"mmlu_public_relations": {
"acc": true
},
"mmlu_security_studies": {
"acc": true
},
"mmlu_social_sciences": {
"acc": true
},
"mmlu_sociology": {
"acc": true
},
"mmlu_stem": {
"acc": true
},
"mmlu_us_foreign_policy": {
"acc": true
},
"mmlu_virology": {
"acc": true
},
"mmlu_world_religions": {
"acc": true
},
"piqa": {
"acc": true,
"acc_norm": true
},
"sciq": {
"acc": true,
"acc_norm": true
},
"wikitext": {
"word_perplexity": false,
"byte_perplexity": false,
"bits_per_byte": false
},
"winogrande": {
"acc": true
},
"wsc": {
"acc": true
}
},
"n-samples": {
"wsc": {
"original": 104,
"effective": 104
},
"winogrande": {
"original": 1267,
"effective": 1267
},
"wikitext": {
"original": 62,
"effective": 62
},
"sciq": {
"original": 1000,
"effective": 1000
},
"piqa": {
"original": 1838,
"effective": 1838
},
"mmlu_elementary_mathematics": {
"original": 378,
"effective": 378
},
"mmlu_electrical_engineering": {
"original": 145,
"effective": 145
},
"mmlu_high_school_computer_science": {
"original": 100,
"effective": 100
},
"mmlu_high_school_physics": {
"original": 151,
"effective": 151
},
"mmlu_college_mathematics": {
"original": 100,
"effective": 100
},
"mmlu_college_chemistry": {
"original": 100,
"effective": 100
},
"mmlu_machine_learning": {
"original": 112,
"effective": 112
},
"mmlu_high_school_mathematics": {
"original": 270,
"effective": 270
},
"mmlu_computer_security": {
"original": 100,
"effective": 100
},
"mmlu_conceptual_physics": {
"original": 235,
"effective": 235
},
"mmlu_high_school_statistics": {
"original": 216,
"effective": 216
},
"mmlu_high_school_biology": {
"original": 310,
"effective": 310
},
"mmlu_astronomy": {
"original": 152,
"effective": 152
},
"mmlu_college_computer_science": {
"original": 100,
"effective": 100
},
"mmlu_college_biology": {
"original": 144,
"effective": 144
},
"mmlu_college_physics": {
"original": 102,
"effective": 102
},
"mmlu_anatomy": {
"original": 135,
"effective": 135
},
"mmlu_high_school_chemistry": {
"original": 203,
"effective": 203
},
"mmlu_abstract_algebra": {
"original": 100,
"effective": 100
},
"mmlu_college_medicine": {
"original": 173,
"effective": 173
},
"mmlu_medical_genetics": {
"original": 100,
"effective": 100
},
"mmlu_business_ethics": {
"original": 100,
"effective": 100
},
"mmlu_miscellaneous": {
"original": 783,
"effective": 783
},
"mmlu_nutrition": {
"original": 306,
"effective": 306
},
"mmlu_clinical_knowledge": {
"original": 265,
"effective": 265
},
"mmlu_human_aging": {
"original": 223,
"effective": 223
},
"mmlu_professional_accounting": {
"original": 282,
"effective": 282
},
"mmlu_marketing": {
"original": 234,
"effective": 234
},
"mmlu_global_facts": {
"original": 100,
"effective": 100
},
"mmlu_professional_medicine": {
"original": 272,
"effective": 272
},
"mmlu_virology": {
"original": 166,
"effective": 166
},
"mmlu_management": {
"original": 103,
"effective": 103
},
"mmlu_us_foreign_policy": {
"original": 100,
"effective": 100
},
"mmlu_sociology": {
"original": 201,
"effective": 201
},
"mmlu_econometrics": {
"original": 114,
"effective": 114
},
"mmlu_security_studies": {
"original": 245,
"effective": 245
},
"mmlu_high_school_geography": {
"original": 198,
"effective": 198
},
"mmlu_public_relations": {
"original": 110,
"effective": 110
},
"mmlu_high_school_microeconomics": {
"original": 238,
"effective": 238
},
"mmlu_professional_psychology": {
"original": 612,
"effective": 612
},
"mmlu_high_school_macroeconomics": {
"original": 390,
"effective": 390
},
"mmlu_human_sexuality": {
"original": 131,
"effective": 131
},
"mmlu_high_school_government_and_politics": {
"original": 193,
"effective": 193
},
"mmlu_high_school_psychology": {
"original": 545,
"effective": 545
},
"mmlu_moral_disputes": {
"original": 346,
"effective": 346
},
"mmlu_high_school_world_history": {
"original": 237,
"effective": 237
},
"mmlu_jurisprudence": {
"original": 108,
"effective": 108
},
"mmlu_philosophy": {
"original": 311,
"effective": 311
},
"mmlu_high_school_us_history": {
"original": 204,
"effective": 204
},
"mmlu_professional_law": {
"original": 1534,
"effective": 1534
},
"mmlu_logical_fallacies": {
"original": 163,
"effective": 163
},
"mmlu_moral_scenarios": {
"original": 895,
"effective": 895
},
"mmlu_formal_logic": {
"original": 126,
"effective": 126
},
"mmlu_prehistory": {
"original": 324,
"effective": 324
},
"mmlu_high_school_european_history": {
"original": 165,
"effective": 165
},
"mmlu_world_religions": {
"original": 171,
"effective": 171
},
"mmlu_international_law": {
"original": 121,
"effective": 121
},
"logiqa": {
"original": 651,
"effective": 651
},
"lambada_openai": {
"original": 5153,
"effective": 5153
},
"blimp_adjunct_island": {
"original": 1000,
"effective": 1000
},
"blimp_anaphor_gender_agreement": {
"original": 1000,
"effective": 1000
},
"blimp_anaphor_number_agreement": {
"original": 1000,
"effective": 1000
},
"blimp_animate_subject_passive": {
"original": 1000,
"effective": 1000
},
"blimp_animate_subject_trans": {
"original": 1000,
"effective": 1000
},
"blimp_causative": {
"original": 1000,
"effective": 1000
},
"blimp_complex_NP_island": {
"original": 1000,
"effective": 1000
},
"blimp_coordinate_structure_constraint_complex_left_branch": {
"original": 1000,
"effective": 1000
},
"blimp_coordinate_structure_constraint_object_extraction": {
"original": 1000,
"effective": 1000
},
"blimp_determiner_noun_agreement_1": {
"original": 1000,
"effective": 1000
},
"blimp_determiner_noun_agreement_2": {
"original": 1000,
"effective": 1000
},
"blimp_determiner_noun_agreement_irregular_1": {
"original": 1000,
"effective": 1000
},
"blimp_determiner_noun_agreement_irregular_2": {
"original": 1000,
"effective": 1000
},
"blimp_determiner_noun_agreement_with_adj_2": {
"original": 1000,
"effective": 1000
},
"blimp_determiner_noun_agreement_with_adj_irregular_1": {
"original": 1000,
"effective": 1000
},
"blimp_determiner_noun_agreement_with_adj_irregular_2": {
"original": 1000,
"effective": 1000
},
"blimp_determiner_noun_agreement_with_adjective_1": {
"original": 1000,
"effective": 1000
},
"blimp_distractor_agreement_relational_noun": {
"original": 1000,
"effective": 1000
},
"blimp_distractor_agreement_relative_clause": {
"original": 1000,
"effective": 1000
},
"blimp_drop_argument": {
"original": 1000,
"effective": 1000
},
"blimp_ellipsis_n_bar_1": {
"original": 1000,
"effective": 1000
},
"blimp_ellipsis_n_bar_2": {
"original": 1000,
"effective": 1000
},
"blimp_existential_there_object_raising": {
"original": 1000,
"effective": 1000
},
"blimp_existential_there_quantifiers_1": {
"original": 1000,
"effective": 1000
},
"blimp_existential_there_quantifiers_2": {
"original": 1000,
"effective": 1000
},
"blimp_existential_there_subject_raising": {
"original": 1000,
"effective": 1000
},
"blimp_expletive_it_object_raising": {
"original": 1000,
"effective": 1000
},
"blimp_inchoative": {
"original": 1000,
"effective": 1000
},
"blimp_intransitive": {
"original": 1000,
"effective": 1000
},
"blimp_irregular_past_participle_adjectives": {
"original": 1000,
"effective": 1000
},
"blimp_irregular_past_participle_verbs": {
"original": 1000,
"effective": 1000
},
"blimp_irregular_plural_subject_verb_agreement_1": {
"original": 1000,
"effective": 1000
},
"blimp_irregular_plural_subject_verb_agreement_2": {
"original": 1000,
"effective": 1000
},
"blimp_left_branch_island_echo_question": {
"original": 1000,
"effective": 1000
},
"blimp_left_branch_island_simple_question": {
"original": 1000,
"effective": 1000
},
"blimp_matrix_question_npi_licensor_present": {
"original": 1000,
"effective": 1000
},
"blimp_npi_present_1": {
"original": 1000,
"effective": 1000
},
"blimp_npi_present_2": {
"original": 1000,
"effective": 1000
},
"blimp_only_npi_licensor_present": {
"original": 1000,
"effective": 1000
},
"blimp_only_npi_scope": {
"original": 1000,
"effective": 1000
},
"blimp_passive_1": {
"original": 1000,
"effective": 1000
},
"blimp_passive_2": {
"original": 1000,
"effective": 1000
},
"blimp_principle_A_c_command": {
"original": 1000,
"effective": 1000
},
"blimp_principle_A_case_1": {
"original": 1000,
"effective": 1000
},
"blimp_principle_A_case_2": {
"original": 1000,
"effective": 1000
},
"blimp_principle_A_domain_1": {
"original": 1000,
"effective": 1000
},
"blimp_principle_A_domain_2": {
"original": 1000,
"effective": 1000
},
"blimp_principle_A_domain_3": {
"original": 1000,
"effective": 1000
},
"blimp_principle_A_reconstruction": {
"original": 1000,
"effective": 1000
},
"blimp_regular_plural_subject_verb_agreement_1": {
"original": 1000,
"effective": 1000
},
"blimp_regular_plural_subject_verb_agreement_2": {
"original": 1000,
"effective": 1000
},
"blimp_sentential_negation_npi_licensor_present": {
"original": 1000,
"effective": 1000
},
"blimp_sentential_negation_npi_scope": {
"original": 1000,
"effective": 1000
},
"blimp_sentential_subject_island": {
"original": 1000,
"effective": 1000
},
"blimp_superlative_quantifiers_1": {
"original": 1000,
"effective": 1000
},
"blimp_superlative_quantifiers_2": {
"original": 1000,
"effective": 1000
},
"blimp_tough_vs_raising_1": {
"original": 1000,
"effective": 1000
},
"blimp_tough_vs_raising_2": {
"original": 1000,
"effective": 1000
},
"blimp_transitive": {
"original": 1000,
"effective": 1000
},
"blimp_wh_island": {
"original": 1000,
"effective": 1000
},
"blimp_wh_questions_object_gap": {
"original": 1000,
"effective": 1000
},
"blimp_wh_questions_subject_gap": {
"original": 1000,
"effective": 1000
},
"blimp_wh_questions_subject_gap_long_distance": {
"original": 1000,
"effective": 1000
},
"blimp_wh_vs_that_no_gap": {
"original": 1000,
"effective": 1000
},
"blimp_wh_vs_that_no_gap_long_distance": {
"original": 1000,
"effective": 1000
},
"blimp_wh_vs_that_with_gap": {
"original": 1000,
"effective": 1000
},
"blimp_wh_vs_that_with_gap_long_distance": {
"original": 1000,
"effective": 1000
},
"arc_challenge": {
"original": 1172,
"effective": 1172
},
"arc_easy": {
"original": 2376,
"effective": 2376
}
},
"config": {
"model": "hf",
"model_args": "pretrained=EleutherAI/pythia-70m,revision=step32,dtype=float,trust_remote_code=True",
"model_num_parameters": 70426624,
"model_dtype": "torch.float32",
"model_revision": "step32",
"model_sha": "9e0688dcb3a4847a2915e42877894d2474ec5fa7",
"batch_size": "8",
"batch_sizes": [],
"device": "cuda:0",
"use_cache": null,
"limit": null,
"bootstrap_iters": 100000,
"gen_kwargs": null,
"random_seed": 0,
"numpy_seed": 1234,
"torch_seed": 1234,
"fewshot_seed": 1234
},
"git_hash": "a5b7c41",
"date": 1729868979.6040666,
"pretty_env_info": "PyTorch version: 2.5.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.3 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: 14.0.0-1ubuntu1.1\nCMake version: version 3.30.5\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.1.85+-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.2.140\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: NVIDIA A100-SXM4-40GB\nNvidia driver version: 535.104.05\ncuDNN version: Probably one of the following:\n/usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.6\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 12\nOn-line CPU(s) list: 0-11\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 6\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.30\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 192 KiB (6 instances)\nL1i cache: 192 KiB (6 instances)\nL2 cache: 6 MiB (6 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-11\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable\nVulnerability Reg file data sampling: Not affected\nVulnerability Retbleed: Vulnerable\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Vulnerable\nVulnerability Spectre v1: Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers\nVulnerability Spectre v2: Vulnerable; IBPB: disabled; STIBP: disabled; PBRSB-eIBRS: Vulnerable; BHI: Vulnerable (Syscall hardening enabled)\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Vulnerable\n\nVersions of relevant libraries:\n[pip3] mypy==1.13.0\n[pip3] mypy-extensions==1.0.0\n[pip3] numpy==1.26.4\n[pip3] optree==0.13.0\n[pip3] torch==2.5.0+cu121\n[pip3] torchaudio==2.5.0+cu121\n[pip3] torchsummary==1.5.1\n[pip3] torchvision==0.20.0+cu121\n[conda] Could not collect",
"transformers_version": "4.44.2",
"upper_git_hash": null,
"tokenizer_pad_token": [
"<|endoftext|>",
"0"
],
"tokenizer_eos_token": [
"<|endoftext|>",
"0"
],
"tokenizer_bos_token": [
"<|endoftext|>",
"0"
],
"eot_token_id": 0,
"max_length": 2048,
"task_hashes": {},
"model_source": "hf",
"model_name": "EleutherAI/pythia-70m",
"model_name_sanitized": "EleutherAI__pythia-70m",
"system_instruction": null,
"system_instruction_sha": null,
"fewshot_as_multiturn": false,
"chat_template": null,
"chat_template_sha": null,
"start_time": 3861.23442999,
"end_time": 4504.757314303,
"total_evaluation_time_seconds": "643.5228843130003"
}