Last commit not found
{ | |
"results": { | |
"arc_challenge": { | |
"alias": "arc_challenge", | |
"acc,none": 0.21245733788395904, | |
"acc_stderr,none": 0.011953482906582952, | |
"acc_norm,none": 0.24488054607508533, | |
"acc_norm_stderr,none": 0.012566273985131358 | |
}, | |
"arc_easy": { | |
"alias": "arc_easy", | |
"acc,none": 0.26851851851851855, | |
"acc_stderr,none": 0.00909404255499485, | |
"acc_norm,none": 0.25462962962962965, | |
"acc_norm_stderr,none": 0.008939407288589395 | |
}, | |
"blimp": { | |
"acc,none": 0.5234925373134328, | |
"acc_stderr,none": 0.001869288174484705, | |
"alias": "blimp" | |
}, | |
"blimp_adjunct_island": { | |
"alias": " - blimp_adjunct_island", | |
"acc,none": 0.528, | |
"acc_stderr,none": 0.01579447578951148 | |
}, | |
"blimp_anaphor_gender_agreement": { | |
"alias": " - blimp_anaphor_gender_agreement", | |
"acc,none": 0.605, | |
"acc_stderr,none": 0.015466551464829344 | |
}, | |
"blimp_anaphor_number_agreement": { | |
"alias": " - blimp_anaphor_number_agreement", | |
"acc,none": 0.561, | |
"acc_stderr,none": 0.015701131345400774 | |
}, | |
"blimp_animate_subject_passive": { | |
"alias": " - blimp_animate_subject_passive", | |
"acc,none": 0.607, | |
"acc_stderr,none": 0.015452824654081496 | |
}, | |
"blimp_animate_subject_trans": { | |
"alias": " - blimp_animate_subject_trans", | |
"acc,none": 0.804, | |
"acc_stderr,none": 0.012559527926707371 | |
}, | |
"blimp_causative": { | |
"alias": " - blimp_causative", | |
"acc,none": 0.393, | |
"acc_stderr,none": 0.015452824654081496 | |
}, | |
"blimp_complex_NP_island": { | |
"alias": " - blimp_complex_NP_island", | |
"acc,none": 0.467, | |
"acc_stderr,none": 0.01578480789113878 | |
}, | |
"blimp_coordinate_structure_constraint_complex_left_branch": { | |
"alias": " - blimp_coordinate_structure_constraint_complex_left_branch", | |
"acc,none": 0.523, | |
"acc_stderr,none": 0.015802554246726098 | |
}, | |
"blimp_coordinate_structure_constraint_object_extraction": { | |
"alias": " - blimp_coordinate_structure_constraint_object_extraction", | |
"acc,none": 0.614, | |
"acc_stderr,none": 0.015402637476784373 | |
}, | |
"blimp_determiner_noun_agreement_1": { | |
"alias": " - blimp_determiner_noun_agreement_1", | |
"acc,none": 0.515, | |
"acc_stderr,none": 0.015812179641814892 | |
}, | |
"blimp_determiner_noun_agreement_2": { | |
"alias": " - blimp_determiner_noun_agreement_2", | |
"acc,none": 0.514, | |
"acc_stderr,none": 0.01581309754773099 | |
}, | |
"blimp_determiner_noun_agreement_irregular_1": { | |
"alias": " - blimp_determiner_noun_agreement_irregular_1", | |
"acc,none": 0.489, | |
"acc_stderr,none": 0.015815471195292686 | |
}, | |
"blimp_determiner_noun_agreement_irregular_2": { | |
"alias": " - blimp_determiner_noun_agreement_irregular_2", | |
"acc,none": 0.49, | |
"acc_stderr,none": 0.015816135752773203 | |
}, | |
"blimp_determiner_noun_agreement_with_adj_2": { | |
"alias": " - blimp_determiner_noun_agreement_with_adj_2", | |
"acc,none": 0.507, | |
"acc_stderr,none": 0.015817749561843574 | |
}, | |
"blimp_determiner_noun_agreement_with_adj_irregular_1": { | |
"alias": " - blimp_determiner_noun_agreement_with_adj_irregular_1", | |
"acc,none": 0.488, | |
"acc_stderr,none": 0.015814743314581818 | |
}, | |
"blimp_determiner_noun_agreement_with_adj_irregular_2": { | |
"alias": " - blimp_determiner_noun_agreement_with_adj_irregular_2", | |
"acc,none": 0.507, | |
"acc_stderr,none": 0.015817749561843567 | |
}, | |
"blimp_determiner_noun_agreement_with_adjective_1": { | |
"alias": " - blimp_determiner_noun_agreement_with_adjective_1", | |
"acc,none": 0.509, | |
"acc_stderr,none": 0.015816736995005392 | |
}, | |
"blimp_distractor_agreement_relational_noun": { | |
"alias": " - blimp_distractor_agreement_relational_noun", | |
"acc,none": 0.514, | |
"acc_stderr,none": 0.015813097547730987 | |
}, | |
"blimp_distractor_agreement_relative_clause": { | |
"alias": " - blimp_distractor_agreement_relative_clause", | |
"acc,none": 0.499, | |
"acc_stderr,none": 0.01581926829057682 | |
}, | |
"blimp_drop_argument": { | |
"alias": " - blimp_drop_argument", | |
"acc,none": 0.663, | |
"acc_stderr,none": 0.014955087918653609 | |
}, | |
"blimp_ellipsis_n_bar_1": { | |
"alias": " - blimp_ellipsis_n_bar_1", | |
"acc,none": 0.496, | |
"acc_stderr,none": 0.01581879370351089 | |
}, | |
"blimp_ellipsis_n_bar_2": { | |
"alias": " - blimp_ellipsis_n_bar_2", | |
"acc,none": 0.333, | |
"acc_stderr,none": 0.014910846164229859 | |
}, | |
"blimp_existential_there_object_raising": { | |
"alias": " - blimp_existential_there_object_raising", | |
"acc,none": 0.612, | |
"acc_stderr,none": 0.015417317979911077 | |
}, | |
"blimp_existential_there_quantifiers_1": { | |
"alias": " - blimp_existential_there_quantifiers_1", | |
"acc,none": 0.911, | |
"acc_stderr,none": 0.009008893392651504 | |
}, | |
"blimp_existential_there_quantifiers_2": { | |
"alias": " - blimp_existential_there_quantifiers_2", | |
"acc,none": 0.64, | |
"acc_stderr,none": 0.015186527932040126 | |
}, | |
"blimp_existential_there_subject_raising": { | |
"alias": " - blimp_existential_there_subject_raising", | |
"acc,none": 0.53, | |
"acc_stderr,none": 0.015790799515836763 | |
}, | |
"blimp_expletive_it_object_raising": { | |
"alias": " - blimp_expletive_it_object_raising", | |
"acc,none": 0.569, | |
"acc_stderr,none": 0.015667944488173498 | |
}, | |
"blimp_inchoative": { | |
"alias": " - blimp_inchoative", | |
"acc,none": 0.391, | |
"acc_stderr,none": 0.015438826294681792 | |
}, | |
"blimp_intransitive": { | |
"alias": " - blimp_intransitive", | |
"acc,none": 0.561, | |
"acc_stderr,none": 0.015701131345400767 | |
}, | |
"blimp_irregular_past_participle_adjectives": { | |
"alias": " - blimp_irregular_past_participle_adjectives", | |
"acc,none": 0.269, | |
"acc_stderr,none": 0.014029819522568196 | |
}, | |
"blimp_irregular_past_participle_verbs": { | |
"alias": " - blimp_irregular_past_participle_verbs", | |
"acc,none": 0.449, | |
"acc_stderr,none": 0.015736792768752027 | |
}, | |
"blimp_irregular_plural_subject_verb_agreement_1": { | |
"alias": " - blimp_irregular_plural_subject_verb_agreement_1", | |
"acc,none": 0.494, | |
"acc_stderr,none": 0.015818160898606715 | |
}, | |
"blimp_irregular_plural_subject_verb_agreement_2": { | |
"alias": " - blimp_irregular_plural_subject_verb_agreement_2", | |
"acc,none": 0.534, | |
"acc_stderr,none": 0.015782683329937614 | |
}, | |
"blimp_left_branch_island_echo_question": { | |
"alias": " - blimp_left_branch_island_echo_question", | |
"acc,none": 0.598, | |
"acc_stderr,none": 0.015512467135715078 | |
}, | |
"blimp_left_branch_island_simple_question": { | |
"alias": " - blimp_left_branch_island_simple_question", | |
"acc,none": 0.514, | |
"acc_stderr,none": 0.01581309754773099 | |
}, | |
"blimp_matrix_question_npi_licensor_present": { | |
"alias": " - blimp_matrix_question_npi_licensor_present", | |
"acc,none": 0.218, | |
"acc_stderr,none": 0.013063179040595289 | |
}, | |
"blimp_npi_present_1": { | |
"alias": " - blimp_npi_present_1", | |
"acc,none": 0.451, | |
"acc_stderr,none": 0.01574315237958554 | |
}, | |
"blimp_npi_present_2": { | |
"alias": " - blimp_npi_present_2", | |
"acc,none": 0.376, | |
"acc_stderr,none": 0.015325105508898127 | |
}, | |
"blimp_only_npi_licensor_present": { | |
"alias": " - blimp_only_npi_licensor_present", | |
"acc,none": 0.368, | |
"acc_stderr,none": 0.015258073561521802 | |
}, | |
"blimp_only_npi_scope": { | |
"alias": " - blimp_only_npi_scope", | |
"acc,none": 0.623, | |
"acc_stderr,none": 0.015333170125779859 | |
}, | |
"blimp_passive_1": { | |
"alias": " - blimp_passive_1", | |
"acc,none": 0.663, | |
"acc_stderr,none": 0.014955087918653602 | |
}, | |
"blimp_passive_2": { | |
"alias": " - blimp_passive_2", | |
"acc,none": 0.597, | |
"acc_stderr,none": 0.015518757419066533 | |
}, | |
"blimp_principle_A_c_command": { | |
"alias": " - blimp_principle_A_c_command", | |
"acc,none": 0.322, | |
"acc_stderr,none": 0.014782913600996664 | |
}, | |
"blimp_principle_A_case_1": { | |
"alias": " - blimp_principle_A_case_1", | |
"acc,none": 0.833, | |
"acc_stderr,none": 0.011800434324644586 | |
}, | |
"blimp_principle_A_case_2": { | |
"alias": " - blimp_principle_A_case_2", | |
"acc,none": 0.488, | |
"acc_stderr,none": 0.015814743314581818 | |
}, | |
"blimp_principle_A_domain_1": { | |
"alias": " - blimp_principle_A_domain_1", | |
"acc,none": 0.564, | |
"acc_stderr,none": 0.015689173023144078 | |
}, | |
"blimp_principle_A_domain_2": { | |
"alias": " - blimp_principle_A_domain_2", | |
"acc,none": 0.513, | |
"acc_stderr,none": 0.01581395210189663 | |
}, | |
"blimp_principle_A_domain_3": { | |
"alias": " - blimp_principle_A_domain_3", | |
"acc,none": 0.51, | |
"acc_stderr,none": 0.015816135752773196 | |
}, | |
"blimp_principle_A_reconstruction": { | |
"alias": " - blimp_principle_A_reconstruction", | |
"acc,none": 0.451, | |
"acc_stderr,none": 0.015743152379585533 | |
}, | |
"blimp_regular_plural_subject_verb_agreement_1": { | |
"alias": " - blimp_regular_plural_subject_verb_agreement_1", | |
"acc,none": 0.388, | |
"acc_stderr,none": 0.015417317979911076 | |
}, | |
"blimp_regular_plural_subject_verb_agreement_2": { | |
"alias": " - blimp_regular_plural_subject_verb_agreement_2", | |
"acc,none": 0.511, | |
"acc_stderr,none": 0.01581547119529269 | |
}, | |
"blimp_sentential_negation_npi_licensor_present": { | |
"alias": " - blimp_sentential_negation_npi_licensor_present", | |
"acc,none": 0.672, | |
"acc_stderr,none": 0.014853842487270333 | |
}, | |
"blimp_sentential_negation_npi_scope": { | |
"alias": " - blimp_sentential_negation_npi_scope", | |
"acc,none": 0.725, | |
"acc_stderr,none": 0.014127086556490526 | |
}, | |
"blimp_sentential_subject_island": { | |
"alias": " - blimp_sentential_subject_island", | |
"acc,none": 0.464, | |
"acc_stderr,none": 0.01577824302490459 | |
}, | |
"blimp_superlative_quantifiers_1": { | |
"alias": " - blimp_superlative_quantifiers_1", | |
"acc,none": 0.69, | |
"acc_stderr,none": 0.014632638658632905 | |
}, | |
"blimp_superlative_quantifiers_2": { | |
"alias": " - blimp_superlative_quantifiers_2", | |
"acc,none": 0.609, | |
"acc_stderr,none": 0.015438826294681783 | |
}, | |
"blimp_tough_vs_raising_1": { | |
"alias": " - blimp_tough_vs_raising_1", | |
"acc,none": 0.421, | |
"acc_stderr,none": 0.015620595475301318 | |
}, | |
"blimp_tough_vs_raising_2": { | |
"alias": " - blimp_tough_vs_raising_2", | |
"acc,none": 0.61, | |
"acc_stderr,none": 0.015431725053866608 | |
}, | |
"blimp_transitive": { | |
"alias": " - blimp_transitive", | |
"acc,none": 0.519, | |
"acc_stderr,none": 0.01580787426850585 | |
}, | |
"blimp_wh_island": { | |
"alias": " - blimp_wh_island", | |
"acc,none": 0.598, | |
"acc_stderr,none": 0.015512467135715078 | |
}, | |
"blimp_wh_questions_object_gap": { | |
"alias": " - blimp_wh_questions_object_gap", | |
"acc,none": 0.448, | |
"acc_stderr,none": 0.015733516566347836 | |
}, | |
"blimp_wh_questions_subject_gap": { | |
"alias": " - blimp_wh_questions_subject_gap", | |
"acc,none": 0.396, | |
"acc_stderr,none": 0.015473313265859405 | |
}, | |
"blimp_wh_questions_subject_gap_long_distance": { | |
"alias": " - blimp_wh_questions_subject_gap_long_distance", | |
"acc,none": 0.376, | |
"acc_stderr,none": 0.01532510550889813 | |
}, | |
"blimp_wh_vs_that_no_gap": { | |
"alias": " - blimp_wh_vs_that_no_gap", | |
"acc,none": 0.339, | |
"acc_stderr,none": 0.014976758771620344 | |
}, | |
"blimp_wh_vs_that_no_gap_long_distance": { | |
"alias": " - blimp_wh_vs_that_no_gap_long_distance", | |
"acc,none": 0.376, | |
"acc_stderr,none": 0.015325105508898127 | |
}, | |
"blimp_wh_vs_that_with_gap": { | |
"alias": " - blimp_wh_vs_that_with_gap", | |
"acc,none": 0.647, | |
"acc_stderr,none": 0.01512017260548369 | |
}, | |
"blimp_wh_vs_that_with_gap_long_distance": { | |
"alias": " - blimp_wh_vs_that_with_gap_long_distance", | |
"acc,none": 0.61, | |
"acc_stderr,none": 0.01543172505386661 | |
}, | |
"lambada_openai": { | |
"alias": "lambada_openai", | |
"perplexity,none": 3646175.9538808516, | |
"perplexity_stderr,none": 355913.85648543993, | |
"acc,none": 0.0, | |
"acc_stderr,none": 0.0 | |
}, | |
"logiqa": { | |
"alias": "logiqa", | |
"acc,none": 0.22887864823348694, | |
"acc_stderr,none": 0.016478107276313284, | |
"acc_norm,none": 0.2457757296466974, | |
"acc_norm_stderr,none": 0.016887410894296927 | |
}, | |
"mmlu": { | |
"acc,none": 0.24597635664435266, | |
"acc_stderr,none": 0.0036286107848610178, | |
"alias": "mmlu" | |
}, | |
"mmlu_humanities": { | |
"acc,none": 0.24654622741764082, | |
"acc_stderr,none": 0.006286606909050448, | |
"alias": " - humanities" | |
}, | |
"mmlu_formal_logic": { | |
"alias": " - formal_logic", | |
"acc,none": 0.24603174603174602, | |
"acc_stderr,none": 0.03852273364924315 | |
}, | |
"mmlu_high_school_european_history": { | |
"alias": " - high_school_european_history", | |
"acc,none": 0.24242424242424243, | |
"acc_stderr,none": 0.033464098810559534 | |
}, | |
"mmlu_high_school_us_history": { | |
"alias": " - high_school_us_history", | |
"acc,none": 0.23529411764705882, | |
"acc_stderr,none": 0.02977177522814563 | |
}, | |
"mmlu_high_school_world_history": { | |
"alias": " - high_school_world_history", | |
"acc,none": 0.2616033755274262, | |
"acc_stderr,none": 0.028609516716994934 | |
}, | |
"mmlu_international_law": { | |
"alias": " - international_law", | |
"acc,none": 0.256198347107438, | |
"acc_stderr,none": 0.03984979653302872 | |
}, | |
"mmlu_jurisprudence": { | |
"alias": " - jurisprudence", | |
"acc,none": 0.3148148148148148, | |
"acc_stderr,none": 0.04489931073591311 | |
}, | |
"mmlu_logical_fallacies": { | |
"alias": " - logical_fallacies", | |
"acc,none": 0.24539877300613497, | |
"acc_stderr,none": 0.03380939813943354 | |
}, | |
"mmlu_moral_disputes": { | |
"alias": " - moral_disputes", | |
"acc,none": 0.24566473988439305, | |
"acc_stderr,none": 0.02317629820399201 | |
}, | |
"mmlu_moral_scenarios": { | |
"alias": " - moral_scenarios", | |
"acc,none": 0.2424581005586592, | |
"acc_stderr,none": 0.014333522059217887 | |
}, | |
"mmlu_philosophy": { | |
"alias": " - philosophy", | |
"acc,none": 0.2829581993569132, | |
"acc_stderr,none": 0.025583062489984827 | |
}, | |
"mmlu_prehistory": { | |
"alias": " - prehistory", | |
"acc,none": 0.2623456790123457, | |
"acc_stderr,none": 0.024477222856135107 | |
}, | |
"mmlu_professional_law": { | |
"alias": " - professional_law", | |
"acc,none": 0.23728813559322035, | |
"acc_stderr,none": 0.010865436690780259 | |
}, | |
"mmlu_world_religions": { | |
"alias": " - world_religions", | |
"acc,none": 0.2046783625730994, | |
"acc_stderr,none": 0.030944459778533204 | |
}, | |
"mmlu_other": { | |
"acc,none": 0.26295461860315417, | |
"acc_stderr,none": 0.0078721211337469, | |
"alias": " - other" | |
}, | |
"mmlu_business_ethics": { | |
"alias": " - business_ethics", | |
"acc,none": 0.26, | |
"acc_stderr,none": 0.0440844002276808 | |
}, | |
"mmlu_clinical_knowledge": { | |
"alias": " - clinical_knowledge", | |
"acc,none": 0.2188679245283019, | |
"acc_stderr,none": 0.0254478638251086 | |
}, | |
"mmlu_college_medicine": { | |
"alias": " - college_medicine", | |
"acc,none": 0.1907514450867052, | |
"acc_stderr,none": 0.029957851329869337 | |
}, | |
"mmlu_global_facts": { | |
"alias": " - global_facts", | |
"acc,none": 0.31, | |
"acc_stderr,none": 0.04648231987117316 | |
}, | |
"mmlu_human_aging": { | |
"alias": " - human_aging", | |
"acc,none": 0.3721973094170404, | |
"acc_stderr,none": 0.032443052830087304 | |
}, | |
"mmlu_management": { | |
"alias": " - management", | |
"acc,none": 0.2621359223300971, | |
"acc_stderr,none": 0.04354631077260595 | |
}, | |
"mmlu_marketing": { | |
"alias": " - marketing", | |
"acc,none": 0.2606837606837607, | |
"acc_stderr,none": 0.028760348956523414 | |
}, | |
"mmlu_medical_genetics": { | |
"alias": " - medical_genetics", | |
"acc,none": 0.28, | |
"acc_stderr,none": 0.04512608598542128 | |
}, | |
"mmlu_miscellaneous": { | |
"alias": " - miscellaneous", | |
"acc,none": 0.28607918263090676, | |
"acc_stderr,none": 0.01616087140512753 | |
}, | |
"mmlu_nutrition": { | |
"alias": " - nutrition", | |
"acc,none": 0.22549019607843138, | |
"acc_stderr,none": 0.0239291555173513 | |
}, | |
"mmlu_professional_accounting": { | |
"alias": " - professional_accounting", | |
"acc,none": 0.2553191489361702, | |
"acc_stderr,none": 0.026011992930902 | |
}, | |
"mmlu_professional_medicine": { | |
"alias": " - professional_medicine", | |
"acc,none": 0.20220588235294118, | |
"acc_stderr,none": 0.024398192986654924 | |
}, | |
"mmlu_virology": { | |
"alias": " - virology", | |
"acc,none": 0.30120481927710846, | |
"acc_stderr,none": 0.0357160923005348 | |
}, | |
"mmlu_social_sciences": { | |
"acc,none": 0.2349691257718557, | |
"acc_stderr,none": 0.007631497201189245, | |
"alias": " - social sciences" | |
}, | |
"mmlu_econometrics": { | |
"alias": " - econometrics", | |
"acc,none": 0.2982456140350877, | |
"acc_stderr,none": 0.043036840335373146 | |
}, | |
"mmlu_high_school_geography": { | |
"alias": " - high_school_geography", | |
"acc,none": 0.19696969696969696, | |
"acc_stderr,none": 0.02833560973246335 | |
}, | |
"mmlu_high_school_government_and_politics": { | |
"alias": " - high_school_government_and_politics", | |
"acc,none": 0.20725388601036268, | |
"acc_stderr,none": 0.029252823291803627 | |
}, | |
"mmlu_high_school_macroeconomics": { | |
"alias": " - high_school_macroeconomics", | |
"acc,none": 0.2230769230769231, | |
"acc_stderr,none": 0.021107730127244 | |
}, | |
"mmlu_high_school_microeconomics": { | |
"alias": " - high_school_microeconomics", | |
"acc,none": 0.2184873949579832, | |
"acc_stderr,none": 0.026841514322958927 | |
}, | |
"mmlu_high_school_psychology": { | |
"alias": " - high_school_psychology", | |
"acc,none": 0.23486238532110093, | |
"acc_stderr,none": 0.018175110510343588 | |
}, | |
"mmlu_human_sexuality": { | |
"alias": " - human_sexuality", | |
"acc,none": 0.25190839694656486, | |
"acc_stderr,none": 0.03807387116306086 | |
}, | |
"mmlu_professional_psychology": { | |
"alias": " - professional_psychology", | |
"acc,none": 0.25980392156862747, | |
"acc_stderr,none": 0.017740899509177784 | |
}, | |
"mmlu_public_relations": { | |
"alias": " - public_relations", | |
"acc,none": 0.34545454545454546, | |
"acc_stderr,none": 0.04554619617541054 | |
}, | |
"mmlu_security_studies": { | |
"alias": " - security_studies", | |
"acc,none": 0.17142857142857143, | |
"acc_stderr,none": 0.02412746346265016 | |
}, | |
"mmlu_sociology": { | |
"alias": " - sociology", | |
"acc,none": 0.24875621890547264, | |
"acc_stderr,none": 0.030567675938916707 | |
}, | |
"mmlu_us_foreign_policy": { | |
"alias": " - us_foreign_policy", | |
"acc,none": 0.21, | |
"acc_stderr,none": 0.04093601807403326 | |
}, | |
"mmlu_stem": { | |
"acc,none": 0.2391373295274342, | |
"acc_stderr,none": 0.007583045788703864, | |
"alias": " - stem" | |
}, | |
"mmlu_abstract_algebra": { | |
"alias": " - abstract_algebra", | |
"acc,none": 0.26, | |
"acc_stderr,none": 0.04408440022768079 | |
}, | |
"mmlu_anatomy": { | |
"alias": " - anatomy", | |
"acc,none": 0.26666666666666666, | |
"acc_stderr,none": 0.038201699145179055 | |
}, | |
"mmlu_astronomy": { | |
"alias": " - astronomy", | |
"acc,none": 0.19078947368421054, | |
"acc_stderr,none": 0.031975658210325 | |
}, | |
"mmlu_college_biology": { | |
"alias": " - college_biology", | |
"acc,none": 0.2222222222222222, | |
"acc_stderr,none": 0.03476590104304134 | |
}, | |
"mmlu_college_chemistry": { | |
"alias": " - college_chemistry", | |
"acc,none": 0.21, | |
"acc_stderr,none": 0.040936018074033256 | |
}, | |
"mmlu_college_computer_science": { | |
"alias": " - college_computer_science", | |
"acc,none": 0.15, | |
"acc_stderr,none": 0.0358870281282637 | |
}, | |
"mmlu_college_mathematics": { | |
"alias": " - college_mathematics", | |
"acc,none": 0.23, | |
"acc_stderr,none": 0.04229525846816506 | |
}, | |
"mmlu_college_physics": { | |
"alias": " - college_physics", | |
"acc,none": 0.19607843137254902, | |
"acc_stderr,none": 0.03950581861179964 | |
}, | |
"mmlu_computer_security": { | |
"alias": " - computer_security", | |
"acc,none": 0.24, | |
"acc_stderr,none": 0.04292346959909282 | |
}, | |
"mmlu_conceptual_physics": { | |
"alias": " - conceptual_physics", | |
"acc,none": 0.31063829787234043, | |
"acc_stderr,none": 0.03025123757921317 | |
}, | |
"mmlu_electrical_engineering": { | |
"alias": " - electrical_engineering", | |
"acc,none": 0.2206896551724138, | |
"acc_stderr,none": 0.03455930201924811 | |
}, | |
"mmlu_elementary_mathematics": { | |
"alias": " - elementary_mathematics", | |
"acc,none": 0.26455026455026454, | |
"acc_stderr,none": 0.022717467897708617 | |
}, | |
"mmlu_high_school_biology": { | |
"alias": " - high_school_biology", | |
"acc,none": 0.25483870967741934, | |
"acc_stderr,none": 0.02479011845933221 | |
}, | |
"mmlu_high_school_chemistry": { | |
"alias": " - high_school_chemistry", | |
"acc,none": 0.27586206896551724, | |
"acc_stderr,none": 0.031447125816782426 | |
}, | |
"mmlu_high_school_computer_science": { | |
"alias": " - high_school_computer_science", | |
"acc,none": 0.23, | |
"acc_stderr,none": 0.042295258468165044 | |
}, | |
"mmlu_high_school_mathematics": { | |
"alias": " - high_school_mathematics", | |
"acc,none": 0.25555555555555554, | |
"acc_stderr,none": 0.02659393910184407 | |
}, | |
"mmlu_high_school_physics": { | |
"alias": " - high_school_physics", | |
"acc,none": 0.1986754966887417, | |
"acc_stderr,none": 0.03257847384436775 | |
}, | |
"mmlu_high_school_statistics": { | |
"alias": " - high_school_statistics", | |
"acc,none": 0.1574074074074074, | |
"acc_stderr,none": 0.024837173518242394 | |
}, | |
"mmlu_machine_learning": { | |
"alias": " - machine_learning", | |
"acc,none": 0.2857142857142857, | |
"acc_stderr,none": 0.04287858751340455 | |
}, | |
"piqa": { | |
"alias": "piqa", | |
"acc,none": 0.528835690968444, | |
"acc_stderr,none": 0.011646407809944727, | |
"acc_norm,none": 0.5206746463547334, | |
"acc_norm_stderr,none": 0.011655846995729705 | |
}, | |
"sciq": { | |
"alias": "sciq", | |
"acc,none": 0.203, | |
"acc_stderr,none": 0.012726073744598282, | |
"acc_norm,none": 0.212, | |
"acc_norm_stderr,none": 0.012931481864938041 | |
}, | |
"wikitext": { | |
"alias": "wikitext", | |
"word_perplexity,none": 277720.2624424431, | |
"word_perplexity_stderr,none": "N/A", | |
"byte_perplexity,none": 10.422738126623752, | |
"byte_perplexity_stderr,none": "N/A", | |
"bits_per_byte,none": 3.381662428432054, | |
"bits_per_byte_stderr,none": "N/A" | |
}, | |
"winogrande": { | |
"alias": "winogrande", | |
"acc,none": 0.49171270718232046, | |
"acc_stderr,none": 0.014050555322824189 | |
}, | |
"wsc": { | |
"alias": "wsc", | |
"acc,none": 0.6153846153846154, | |
"acc_stderr,none": 0.0479366886807504 | |
} | |
}, | |
"groups": { | |
"blimp": { | |
"acc,none": 0.5234925373134328, | |
"acc_stderr,none": 0.001869288174484705, | |
"alias": "blimp" | |
}, | |
"mmlu": { | |
"acc,none": 0.24597635664435266, | |
"acc_stderr,none": 0.0036286107848610178, | |
"alias": "mmlu" | |
}, | |
"mmlu_humanities": { | |
"acc,none": 0.24654622741764082, | |
"acc_stderr,none": 0.006286606909050448, | |
"alias": " - humanities" | |
}, | |
"mmlu_other": { | |
"acc,none": 0.26295461860315417, | |
"acc_stderr,none": 0.0078721211337469, | |
"alias": " - other" | |
}, | |
"mmlu_social_sciences": { | |
"acc,none": 0.2349691257718557, | |
"acc_stderr,none": 0.007631497201189245, | |
"alias": " - social sciences" | |
}, | |
"mmlu_stem": { | |
"acc,none": 0.2391373295274342, | |
"acc_stderr,none": 0.007583045788703864, | |
"alias": " - stem" | |
} | |
}, | |
"group_subtasks": { | |
"arc_easy": [], | |
"arc_challenge": [], | |
"blimp": [ | |
"blimp_adjunct_island", | |
"blimp_anaphor_gender_agreement", | |
"blimp_anaphor_number_agreement", | |
"blimp_animate_subject_passive", | |
"blimp_animate_subject_trans", | |
"blimp_causative", | |
"blimp_complex_NP_island", | |
"blimp_coordinate_structure_constraint_complex_left_branch", | |
"blimp_coordinate_structure_constraint_object_extraction", | |
"blimp_determiner_noun_agreement_1", | |
"blimp_determiner_noun_agreement_2", | |
"blimp_determiner_noun_agreement_irregular_1", | |
"blimp_determiner_noun_agreement_irregular_2", | |
"blimp_determiner_noun_agreement_with_adj_2", | |
"blimp_determiner_noun_agreement_with_adj_irregular_1", | |
"blimp_determiner_noun_agreement_with_adj_irregular_2", | |
"blimp_determiner_noun_agreement_with_adjective_1", | |
"blimp_distractor_agreement_relational_noun", | |
"blimp_distractor_agreement_relative_clause", | |
"blimp_drop_argument", | |
"blimp_ellipsis_n_bar_1", | |
"blimp_ellipsis_n_bar_2", | |
"blimp_existential_there_object_raising", | |
"blimp_existential_there_quantifiers_1", | |
"blimp_existential_there_quantifiers_2", | |
"blimp_existential_there_subject_raising", | |
"blimp_expletive_it_object_raising", | |
"blimp_inchoative", | |
"blimp_intransitive", | |
"blimp_irregular_past_participle_adjectives", | |
"blimp_irregular_past_participle_verbs", | |
"blimp_irregular_plural_subject_verb_agreement_1", | |
"blimp_irregular_plural_subject_verb_agreement_2", | |
"blimp_left_branch_island_echo_question", | |
"blimp_left_branch_island_simple_question", | |
"blimp_matrix_question_npi_licensor_present", | |
"blimp_npi_present_1", | |
"blimp_npi_present_2", | |
"blimp_only_npi_licensor_present", | |
"blimp_only_npi_scope", | |
"blimp_passive_1", | |
"blimp_passive_2", | |
"blimp_principle_A_c_command", | |
"blimp_principle_A_case_1", | |
"blimp_principle_A_case_2", | |
"blimp_principle_A_domain_1", | |
"blimp_principle_A_domain_2", | |
"blimp_principle_A_domain_3", | |
"blimp_principle_A_reconstruction", | |
"blimp_regular_plural_subject_verb_agreement_1", | |
"blimp_regular_plural_subject_verb_agreement_2", | |
"blimp_sentential_negation_npi_licensor_present", | |
"blimp_sentential_negation_npi_scope", | |
"blimp_sentential_subject_island", | |
"blimp_superlative_quantifiers_1", | |
"blimp_superlative_quantifiers_2", | |
"blimp_tough_vs_raising_1", | |
"blimp_tough_vs_raising_2", | |
"blimp_transitive", | |
"blimp_wh_island", | |
"blimp_wh_questions_object_gap", | |
"blimp_wh_questions_subject_gap", | |
"blimp_wh_questions_subject_gap_long_distance", | |
"blimp_wh_vs_that_no_gap", | |
"blimp_wh_vs_that_no_gap_long_distance", | |
"blimp_wh_vs_that_with_gap", | |
"blimp_wh_vs_that_with_gap_long_distance" | |
], | |
"lambada_openai": [], | |
"logiqa": [], | |
"mmlu_humanities": [ | |
"mmlu_moral_disputes", | |
"mmlu_high_school_world_history", | |
"mmlu_jurisprudence", | |
"mmlu_philosophy", | |
"mmlu_high_school_us_history", | |
"mmlu_professional_law", | |
"mmlu_logical_fallacies", | |
"mmlu_moral_scenarios", | |
"mmlu_formal_logic", | |
"mmlu_prehistory", | |
"mmlu_high_school_european_history", | |
"mmlu_world_religions", | |
"mmlu_international_law" | |
], | |
"mmlu_social_sciences": [ | |
"mmlu_us_foreign_policy", | |
"mmlu_sociology", | |
"mmlu_econometrics", | |
"mmlu_security_studies", | |
"mmlu_high_school_geography", | |
"mmlu_public_relations", | |
"mmlu_high_school_microeconomics", | |
"mmlu_professional_psychology", | |
"mmlu_high_school_macroeconomics", | |
"mmlu_human_sexuality", | |
"mmlu_high_school_government_and_politics", | |
"mmlu_high_school_psychology" | |
], | |
"mmlu_other": [ | |
"mmlu_college_medicine", | |
"mmlu_medical_genetics", | |
"mmlu_business_ethics", | |
"mmlu_miscellaneous", | |
"mmlu_nutrition", | |
"mmlu_clinical_knowledge", | |
"mmlu_human_aging", | |
"mmlu_professional_accounting", | |
"mmlu_marketing", | |
"mmlu_global_facts", | |
"mmlu_professional_medicine", | |
"mmlu_virology", | |
"mmlu_management" | |
], | |
"mmlu_stem": [ | |
"mmlu_elementary_mathematics", | |
"mmlu_electrical_engineering", | |
"mmlu_high_school_computer_science", | |
"mmlu_high_school_physics", | |
"mmlu_college_mathematics", | |
"mmlu_college_chemistry", | |
"mmlu_machine_learning", | |
"mmlu_high_school_mathematics", | |
"mmlu_computer_security", | |
"mmlu_conceptual_physics", | |
"mmlu_high_school_statistics", | |
"mmlu_high_school_biology", | |
"mmlu_astronomy", | |
"mmlu_college_computer_science", | |
"mmlu_college_biology", | |
"mmlu_college_physics", | |
"mmlu_anatomy", | |
"mmlu_high_school_chemistry", | |
"mmlu_abstract_algebra" | |
], | |
"mmlu": [ | |
"mmlu_stem", | |
"mmlu_other", | |
"mmlu_social_sciences", | |
"mmlu_humanities" | |
], | |
"piqa": [], | |
"sciq": [], | |
"wikitext": [], | |
"winogrande": [], | |
"wsc": [] | |
}, | |
"configs": { | |
"arc_challenge": { | |
"task": "arc_challenge", | |
"tag": [ | |
"ai2_arc" | |
], | |
"dataset_path": "allenai/ai2_arc", | |
"dataset_name": "ARC-Challenge", | |
"training_split": "train", | |
"validation_split": "validation", | |
"test_split": "test", | |
"doc_to_text": "Question: {{question}}\nAnswer:", | |
"doc_to_target": "{{choices.label.index(answerKey)}}", | |
"doc_to_choice": "{{choices.text}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
}, | |
{ | |
"metric": "acc_norm", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"arc_easy": { | |
"task": "arc_easy", | |
"tag": [ | |
"ai2_arc" | |
], | |
"dataset_path": "allenai/ai2_arc", | |
"dataset_name": "ARC-Easy", | |
"training_split": "train", | |
"validation_split": "validation", | |
"test_split": "test", | |
"doc_to_text": "Question: {{question}}\nAnswer:", | |
"doc_to_target": "{{choices.label.index(answerKey)}}", | |
"doc_to_choice": "{{choices.text}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
}, | |
{ | |
"metric": "acc_norm", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_adjunct_island": { | |
"task": "blimp_adjunct_island", | |
"dataset_path": "blimp", | |
"dataset_name": "adjunct_island", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_anaphor_gender_agreement": { | |
"task": "blimp_anaphor_gender_agreement", | |
"dataset_path": "blimp", | |
"dataset_name": "anaphor_gender_agreement", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_anaphor_number_agreement": { | |
"task": "blimp_anaphor_number_agreement", | |
"dataset_path": "blimp", | |
"dataset_name": "anaphor_number_agreement", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_animate_subject_passive": { | |
"task": "blimp_animate_subject_passive", | |
"dataset_path": "blimp", | |
"dataset_name": "animate_subject_passive", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_animate_subject_trans": { | |
"task": "blimp_animate_subject_trans", | |
"dataset_path": "blimp", | |
"dataset_name": "animate_subject_trans", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_causative": { | |
"task": "blimp_causative", | |
"dataset_path": "blimp", | |
"dataset_name": "causative", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_complex_NP_island": { | |
"task": "blimp_complex_NP_island", | |
"dataset_path": "blimp", | |
"dataset_name": "complex_NP_island", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_coordinate_structure_constraint_complex_left_branch": { | |
"task": "blimp_coordinate_structure_constraint_complex_left_branch", | |
"dataset_path": "blimp", | |
"dataset_name": "coordinate_structure_constraint_complex_left_branch", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_coordinate_structure_constraint_object_extraction": { | |
"task": "blimp_coordinate_structure_constraint_object_extraction", | |
"dataset_path": "blimp", | |
"dataset_name": "coordinate_structure_constraint_object_extraction", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_determiner_noun_agreement_1": { | |
"task": "blimp_determiner_noun_agreement_1", | |
"dataset_path": "blimp", | |
"dataset_name": "determiner_noun_agreement_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_determiner_noun_agreement_2": { | |
"task": "blimp_determiner_noun_agreement_2", | |
"dataset_path": "blimp", | |
"dataset_name": "determiner_noun_agreement_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_determiner_noun_agreement_irregular_1": { | |
"task": "blimp_determiner_noun_agreement_irregular_1", | |
"dataset_path": "blimp", | |
"dataset_name": "determiner_noun_agreement_irregular_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_determiner_noun_agreement_irregular_2": { | |
"task": "blimp_determiner_noun_agreement_irregular_2", | |
"dataset_path": "blimp", | |
"dataset_name": "determiner_noun_agreement_irregular_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_determiner_noun_agreement_with_adj_2": { | |
"task": "blimp_determiner_noun_agreement_with_adj_2", | |
"dataset_path": "blimp", | |
"dataset_name": "determiner_noun_agreement_with_adj_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_determiner_noun_agreement_with_adj_irregular_1": { | |
"task": "blimp_determiner_noun_agreement_with_adj_irregular_1", | |
"dataset_path": "blimp", | |
"dataset_name": "determiner_noun_agreement_with_adj_irregular_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_determiner_noun_agreement_with_adj_irregular_2": { | |
"task": "blimp_determiner_noun_agreement_with_adj_irregular_2", | |
"dataset_path": "blimp", | |
"dataset_name": "determiner_noun_agreement_with_adj_irregular_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_determiner_noun_agreement_with_adjective_1": { | |
"task": "blimp_determiner_noun_agreement_with_adjective_1", | |
"dataset_path": "blimp", | |
"dataset_name": "determiner_noun_agreement_with_adjective_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_distractor_agreement_relational_noun": { | |
"task": "blimp_distractor_agreement_relational_noun", | |
"dataset_path": "blimp", | |
"dataset_name": "distractor_agreement_relational_noun", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_distractor_agreement_relative_clause": { | |
"task": "blimp_distractor_agreement_relative_clause", | |
"dataset_path": "blimp", | |
"dataset_name": "distractor_agreement_relative_clause", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_drop_argument": { | |
"task": "blimp_drop_argument", | |
"dataset_path": "blimp", | |
"dataset_name": "drop_argument", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_ellipsis_n_bar_1": { | |
"task": "blimp_ellipsis_n_bar_1", | |
"dataset_path": "blimp", | |
"dataset_name": "ellipsis_n_bar_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_ellipsis_n_bar_2": { | |
"task": "blimp_ellipsis_n_bar_2", | |
"dataset_path": "blimp", | |
"dataset_name": "ellipsis_n_bar_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_existential_there_object_raising": { | |
"task": "blimp_existential_there_object_raising", | |
"dataset_path": "blimp", | |
"dataset_name": "existential_there_object_raising", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_existential_there_quantifiers_1": { | |
"task": "blimp_existential_there_quantifiers_1", | |
"dataset_path": "blimp", | |
"dataset_name": "existential_there_quantifiers_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_existential_there_quantifiers_2": { | |
"task": "blimp_existential_there_quantifiers_2", | |
"dataset_path": "blimp", | |
"dataset_name": "existential_there_quantifiers_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_existential_there_subject_raising": { | |
"task": "blimp_existential_there_subject_raising", | |
"dataset_path": "blimp", | |
"dataset_name": "existential_there_subject_raising", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_expletive_it_object_raising": { | |
"task": "blimp_expletive_it_object_raising", | |
"dataset_path": "blimp", | |
"dataset_name": "expletive_it_object_raising", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_inchoative": { | |
"task": "blimp_inchoative", | |
"dataset_path": "blimp", | |
"dataset_name": "inchoative", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_intransitive": { | |
"task": "blimp_intransitive", | |
"dataset_path": "blimp", | |
"dataset_name": "intransitive", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_irregular_past_participle_adjectives": { | |
"task": "blimp_irregular_past_participle_adjectives", | |
"dataset_path": "blimp", | |
"dataset_name": "irregular_past_participle_adjectives", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_irregular_past_participle_verbs": { | |
"task": "blimp_irregular_past_participle_verbs", | |
"dataset_path": "blimp", | |
"dataset_name": "irregular_past_participle_verbs", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_irregular_plural_subject_verb_agreement_1": { | |
"task": "blimp_irregular_plural_subject_verb_agreement_1", | |
"dataset_path": "blimp", | |
"dataset_name": "irregular_plural_subject_verb_agreement_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_irregular_plural_subject_verb_agreement_2": { | |
"task": "blimp_irregular_plural_subject_verb_agreement_2", | |
"dataset_path": "blimp", | |
"dataset_name": "irregular_plural_subject_verb_agreement_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_left_branch_island_echo_question": { | |
"task": "blimp_left_branch_island_echo_question", | |
"dataset_path": "blimp", | |
"dataset_name": "left_branch_island_echo_question", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_left_branch_island_simple_question": { | |
"task": "blimp_left_branch_island_simple_question", | |
"dataset_path": "blimp", | |
"dataset_name": "left_branch_island_simple_question", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_matrix_question_npi_licensor_present": { | |
"task": "blimp_matrix_question_npi_licensor_present", | |
"dataset_path": "blimp", | |
"dataset_name": "matrix_question_npi_licensor_present", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_npi_present_1": { | |
"task": "blimp_npi_present_1", | |
"dataset_path": "blimp", | |
"dataset_name": "npi_present_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_npi_present_2": { | |
"task": "blimp_npi_present_2", | |
"dataset_path": "blimp", | |
"dataset_name": "npi_present_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_only_npi_licensor_present": { | |
"task": "blimp_only_npi_licensor_present", | |
"dataset_path": "blimp", | |
"dataset_name": "only_npi_licensor_present", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_only_npi_scope": { | |
"task": "blimp_only_npi_scope", | |
"dataset_path": "blimp", | |
"dataset_name": "only_npi_scope", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_passive_1": { | |
"task": "blimp_passive_1", | |
"dataset_path": "blimp", | |
"dataset_name": "passive_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_passive_2": { | |
"task": "blimp_passive_2", | |
"dataset_path": "blimp", | |
"dataset_name": "passive_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_principle_A_c_command": { | |
"task": "blimp_principle_A_c_command", | |
"dataset_path": "blimp", | |
"dataset_name": "principle_A_c_command", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_principle_A_case_1": { | |
"task": "blimp_principle_A_case_1", | |
"dataset_path": "blimp", | |
"dataset_name": "principle_A_case_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_principle_A_case_2": { | |
"task": "blimp_principle_A_case_2", | |
"dataset_path": "blimp", | |
"dataset_name": "principle_A_case_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_principle_A_domain_1": { | |
"task": "blimp_principle_A_domain_1", | |
"dataset_path": "blimp", | |
"dataset_name": "principle_A_domain_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_principle_A_domain_2": { | |
"task": "blimp_principle_A_domain_2", | |
"dataset_path": "blimp", | |
"dataset_name": "principle_A_domain_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_principle_A_domain_3": { | |
"task": "blimp_principle_A_domain_3", | |
"dataset_path": "blimp", | |
"dataset_name": "principle_A_domain_3", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_principle_A_reconstruction": { | |
"task": "blimp_principle_A_reconstruction", | |
"dataset_path": "blimp", | |
"dataset_name": "principle_A_reconstruction", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_regular_plural_subject_verb_agreement_1": { | |
"task": "blimp_regular_plural_subject_verb_agreement_1", | |
"dataset_path": "blimp", | |
"dataset_name": "regular_plural_subject_verb_agreement_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_regular_plural_subject_verb_agreement_2": { | |
"task": "blimp_regular_plural_subject_verb_agreement_2", | |
"dataset_path": "blimp", | |
"dataset_name": "regular_plural_subject_verb_agreement_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_sentential_negation_npi_licensor_present": { | |
"task": "blimp_sentential_negation_npi_licensor_present", | |
"dataset_path": "blimp", | |
"dataset_name": "sentential_negation_npi_licensor_present", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_sentential_negation_npi_scope": { | |
"task": "blimp_sentential_negation_npi_scope", | |
"dataset_path": "blimp", | |
"dataset_name": "sentential_negation_npi_scope", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_sentential_subject_island": { | |
"task": "blimp_sentential_subject_island", | |
"dataset_path": "blimp", | |
"dataset_name": "sentential_subject_island", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_superlative_quantifiers_1": { | |
"task": "blimp_superlative_quantifiers_1", | |
"dataset_path": "blimp", | |
"dataset_name": "superlative_quantifiers_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_superlative_quantifiers_2": { | |
"task": "blimp_superlative_quantifiers_2", | |
"dataset_path": "blimp", | |
"dataset_name": "superlative_quantifiers_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_tough_vs_raising_1": { | |
"task": "blimp_tough_vs_raising_1", | |
"dataset_path": "blimp", | |
"dataset_name": "tough_vs_raising_1", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_tough_vs_raising_2": { | |
"task": "blimp_tough_vs_raising_2", | |
"dataset_path": "blimp", | |
"dataset_name": "tough_vs_raising_2", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_transitive": { | |
"task": "blimp_transitive", | |
"dataset_path": "blimp", | |
"dataset_name": "transitive", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_wh_island": { | |
"task": "blimp_wh_island", | |
"dataset_path": "blimp", | |
"dataset_name": "wh_island", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_wh_questions_object_gap": { | |
"task": "blimp_wh_questions_object_gap", | |
"dataset_path": "blimp", | |
"dataset_name": "wh_questions_object_gap", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_wh_questions_subject_gap": { | |
"task": "blimp_wh_questions_subject_gap", | |
"dataset_path": "blimp", | |
"dataset_name": "wh_questions_subject_gap", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_wh_questions_subject_gap_long_distance": { | |
"task": "blimp_wh_questions_subject_gap_long_distance", | |
"dataset_path": "blimp", | |
"dataset_name": "wh_questions_subject_gap_long_distance", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_wh_vs_that_no_gap": { | |
"task": "blimp_wh_vs_that_no_gap", | |
"dataset_path": "blimp", | |
"dataset_name": "wh_vs_that_no_gap", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_wh_vs_that_no_gap_long_distance": { | |
"task": "blimp_wh_vs_that_no_gap_long_distance", | |
"dataset_path": "blimp", | |
"dataset_name": "wh_vs_that_no_gap_long_distance", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_wh_vs_that_with_gap": { | |
"task": "blimp_wh_vs_that_with_gap", | |
"dataset_path": "blimp", | |
"dataset_name": "wh_vs_that_with_gap", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"blimp_wh_vs_that_with_gap_long_distance": { | |
"task": "blimp_wh_vs_that_with_gap_long_distance", | |
"dataset_path": "blimp", | |
"dataset_name": "wh_vs_that_with_gap_long_distance", | |
"validation_split": "train", | |
"doc_to_text": "", | |
"doc_to_target": 0, | |
"doc_to_choice": "{{[sentence_good, sentence_bad]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"lambada_openai": { | |
"task": "lambada_openai", | |
"tag": [ | |
"lambada" | |
], | |
"dataset_path": "EleutherAI/lambada_openai", | |
"dataset_name": "default", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}", | |
"doc_to_target": "{{' '+text.split(' ')[-1]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "perplexity", | |
"aggregation": "perplexity", | |
"higher_is_better": false | |
}, | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "loglikelihood", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{text}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"logiqa": { | |
"task": "logiqa", | |
"dataset_path": "EleutherAI/logiqa", | |
"dataset_name": "logiqa", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"training_split": "train", | |
"validation_split": "validation", | |
"test_split": "test", | |
"doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Passage: <passage>\n Question: <question>\n Choices:\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n Answer:\n \"\"\"\n choices = [\"a\", \"b\", \"c\", \"d\"]\n prompt = \"Passage: \" + doc[\"context\"] + \"\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\nChoices:\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"Answer:\"\n return prompt\n", | |
"doc_to_target": "def doc_to_target(doc) -> int:\n choices = [\"a\", \"b\", \"c\", \"d\"]\n return choices.index(doc[\"label\"].strip())\n", | |
"doc_to_choice": "{{options}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
}, | |
{ | |
"metric": "acc_norm", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{context}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_abstract_algebra": { | |
"task": "mmlu_abstract_algebra", | |
"task_alias": "abstract_algebra", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "abstract_algebra", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_anatomy": { | |
"task": "mmlu_anatomy", | |
"task_alias": "anatomy", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "anatomy", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about anatomy.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_astronomy": { | |
"task": "mmlu_astronomy", | |
"task_alias": "astronomy", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "astronomy", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about astronomy.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_business_ethics": { | |
"task": "mmlu_business_ethics", | |
"task_alias": "business_ethics", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "business_ethics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about business ethics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_clinical_knowledge": { | |
"task": "mmlu_clinical_knowledge", | |
"task_alias": "clinical_knowledge", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "clinical_knowledge", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_college_biology": { | |
"task": "mmlu_college_biology", | |
"task_alias": "college_biology", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "college_biology", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about college biology.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_college_chemistry": { | |
"task": "mmlu_college_chemistry", | |
"task_alias": "college_chemistry", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "college_chemistry", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about college chemistry.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_college_computer_science": { | |
"task": "mmlu_college_computer_science", | |
"task_alias": "college_computer_science", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "college_computer_science", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about college computer science.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_college_mathematics": { | |
"task": "mmlu_college_mathematics", | |
"task_alias": "college_mathematics", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "college_mathematics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about college mathematics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_college_medicine": { | |
"task": "mmlu_college_medicine", | |
"task_alias": "college_medicine", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "college_medicine", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about college medicine.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_college_physics": { | |
"task": "mmlu_college_physics", | |
"task_alias": "college_physics", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "college_physics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about college physics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_computer_security": { | |
"task": "mmlu_computer_security", | |
"task_alias": "computer_security", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "computer_security", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about computer security.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_conceptual_physics": { | |
"task": "mmlu_conceptual_physics", | |
"task_alias": "conceptual_physics", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "conceptual_physics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_econometrics": { | |
"task": "mmlu_econometrics", | |
"task_alias": "econometrics", | |
"tag": "mmlu_social_sciences_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "econometrics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about econometrics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_electrical_engineering": { | |
"task": "mmlu_electrical_engineering", | |
"task_alias": "electrical_engineering", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "electrical_engineering", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_elementary_mathematics": { | |
"task": "mmlu_elementary_mathematics", | |
"task_alias": "elementary_mathematics", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "elementary_mathematics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_formal_logic": { | |
"task": "mmlu_formal_logic", | |
"task_alias": "formal_logic", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "formal_logic", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about formal logic.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_global_facts": { | |
"task": "mmlu_global_facts", | |
"task_alias": "global_facts", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "global_facts", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about global facts.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_biology": { | |
"task": "mmlu_high_school_biology", | |
"task_alias": "high_school_biology", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_biology", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school biology.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_chemistry": { | |
"task": "mmlu_high_school_chemistry", | |
"task_alias": "high_school_chemistry", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_chemistry", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_computer_science": { | |
"task": "mmlu_high_school_computer_science", | |
"task_alias": "high_school_computer_science", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_computer_science", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school computer science.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_european_history": { | |
"task": "mmlu_high_school_european_history", | |
"task_alias": "high_school_european_history", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_european_history", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school european history.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_geography": { | |
"task": "mmlu_high_school_geography", | |
"task_alias": "high_school_geography", | |
"tag": "mmlu_social_sciences_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_geography", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school geography.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_government_and_politics": { | |
"task": "mmlu_high_school_government_and_politics", | |
"task_alias": "high_school_government_and_politics", | |
"tag": "mmlu_social_sciences_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_government_and_politics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_macroeconomics": { | |
"task": "mmlu_high_school_macroeconomics", | |
"task_alias": "high_school_macroeconomics", | |
"tag": "mmlu_social_sciences_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_macroeconomics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_mathematics": { | |
"task": "mmlu_high_school_mathematics", | |
"task_alias": "high_school_mathematics", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_mathematics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_microeconomics": { | |
"task": "mmlu_high_school_microeconomics", | |
"task_alias": "high_school_microeconomics", | |
"tag": "mmlu_social_sciences_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_microeconomics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_physics": { | |
"task": "mmlu_high_school_physics", | |
"task_alias": "high_school_physics", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_physics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school physics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_psychology": { | |
"task": "mmlu_high_school_psychology", | |
"task_alias": "high_school_psychology", | |
"tag": "mmlu_social_sciences_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_psychology", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school psychology.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_statistics": { | |
"task": "mmlu_high_school_statistics", | |
"task_alias": "high_school_statistics", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_statistics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school statistics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_us_history": { | |
"task": "mmlu_high_school_us_history", | |
"task_alias": "high_school_us_history", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_us_history", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school us history.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_high_school_world_history": { | |
"task": "mmlu_high_school_world_history", | |
"task_alias": "high_school_world_history", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "high_school_world_history", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about high school world history.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_human_aging": { | |
"task": "mmlu_human_aging", | |
"task_alias": "human_aging", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "human_aging", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about human aging.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_human_sexuality": { | |
"task": "mmlu_human_sexuality", | |
"task_alias": "human_sexuality", | |
"tag": "mmlu_social_sciences_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "human_sexuality", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about human sexuality.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_international_law": { | |
"task": "mmlu_international_law", | |
"task_alias": "international_law", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "international_law", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about international law.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_jurisprudence": { | |
"task": "mmlu_jurisprudence", | |
"task_alias": "jurisprudence", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "jurisprudence", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_logical_fallacies": { | |
"task": "mmlu_logical_fallacies", | |
"task_alias": "logical_fallacies", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "logical_fallacies", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_machine_learning": { | |
"task": "mmlu_machine_learning", | |
"task_alias": "machine_learning", | |
"tag": "mmlu_stem_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "machine_learning", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about machine learning.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_management": { | |
"task": "mmlu_management", | |
"task_alias": "management", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "management", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about management.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_marketing": { | |
"task": "mmlu_marketing", | |
"task_alias": "marketing", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "marketing", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about marketing.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_medical_genetics": { | |
"task": "mmlu_medical_genetics", | |
"task_alias": "medical_genetics", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "medical_genetics", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about medical genetics.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_miscellaneous": { | |
"task": "mmlu_miscellaneous", | |
"task_alias": "miscellaneous", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "miscellaneous", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_moral_disputes": { | |
"task": "mmlu_moral_disputes", | |
"task_alias": "moral_disputes", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "moral_disputes", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about moral disputes.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_moral_scenarios": { | |
"task": "mmlu_moral_scenarios", | |
"task_alias": "moral_scenarios", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "moral_scenarios", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_nutrition": { | |
"task": "mmlu_nutrition", | |
"task_alias": "nutrition", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "nutrition", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about nutrition.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_philosophy": { | |
"task": "mmlu_philosophy", | |
"task_alias": "philosophy", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "philosophy", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about philosophy.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_prehistory": { | |
"task": "mmlu_prehistory", | |
"task_alias": "prehistory", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "prehistory", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about prehistory.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_professional_accounting": { | |
"task": "mmlu_professional_accounting", | |
"task_alias": "professional_accounting", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "professional_accounting", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about professional accounting.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_professional_law": { | |
"task": "mmlu_professional_law", | |
"task_alias": "professional_law", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "professional_law", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about professional law.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_professional_medicine": { | |
"task": "mmlu_professional_medicine", | |
"task_alias": "professional_medicine", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "professional_medicine", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about professional medicine.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_professional_psychology": { | |
"task": "mmlu_professional_psychology", | |
"task_alias": "professional_psychology", | |
"tag": "mmlu_social_sciences_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "professional_psychology", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about professional psychology.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_public_relations": { | |
"task": "mmlu_public_relations", | |
"task_alias": "public_relations", | |
"tag": "mmlu_social_sciences_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "public_relations", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about public relations.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_security_studies": { | |
"task": "mmlu_security_studies", | |
"task_alias": "security_studies", | |
"tag": "mmlu_social_sciences_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "security_studies", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about security studies.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_sociology": { | |
"task": "mmlu_sociology", | |
"task_alias": "sociology", | |
"tag": "mmlu_social_sciences_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "sociology", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about sociology.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_us_foreign_policy": { | |
"task": "mmlu_us_foreign_policy", | |
"task_alias": "us_foreign_policy", | |
"tag": "mmlu_social_sciences_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "us_foreign_policy", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_virology": { | |
"task": "mmlu_virology", | |
"task_alias": "virology", | |
"tag": "mmlu_other_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "virology", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about virology.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"mmlu_world_religions": { | |
"task": "mmlu_world_religions", | |
"task_alias": "world_religions", | |
"tag": "mmlu_humanities_tasks", | |
"dataset_path": "hails/mmlu_no_train", | |
"dataset_name": "world_religions", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"test_split": "test", | |
"fewshot_split": "dev", | |
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
"doc_to_target": "answer", | |
"doc_to_choice": [ | |
"A", | |
"B", | |
"C", | |
"D" | |
], | |
"description": "The following are multiple choice questions (with answers) about world religions.\n\n", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"fewshot_config": { | |
"sampler": "first_n" | |
}, | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"piqa": { | |
"task": "piqa", | |
"dataset_path": "piqa", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"training_split": "train", | |
"validation_split": "validation", | |
"doc_to_text": "Question: {{goal}}\nAnswer:", | |
"doc_to_target": "label", | |
"doc_to_choice": "{{[sol1, sol2]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
}, | |
{ | |
"metric": "acc_norm", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "goal", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"sciq": { | |
"task": "sciq", | |
"dataset_path": "sciq", | |
"training_split": "train", | |
"validation_split": "validation", | |
"test_split": "test", | |
"doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:", | |
"doc_to_target": 3, | |
"doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
}, | |
{ | |
"metric": "acc_norm", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{support}} {{question}}", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"wikitext": { | |
"task": "wikitext", | |
"dataset_path": "EleutherAI/wikitext_document_level", | |
"dataset_name": "wikitext-2-raw-v1", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"training_split": "train", | |
"validation_split": "validation", | |
"test_split": "test", | |
"doc_to_text": "", | |
"doc_to_target": "def wikitext_detokenizer(doc):\n string = doc[\"page\"]\n # contractions\n string = string.replace(\"s '\", \"s'\")\n string = re.sub(r\"/' [0-9]/\", r\"/'[0-9]/\", string)\n # number separators\n string = string.replace(\" @-@ \", \"-\")\n string = string.replace(\" @,@ \", \",\")\n string = string.replace(\" @.@ \", \".\")\n # punctuation\n string = string.replace(\" : \", \": \")\n string = string.replace(\" ; \", \"; \")\n string = string.replace(\" . \", \". \")\n string = string.replace(\" ! \", \"! \")\n string = string.replace(\" ? \", \"? \")\n string = string.replace(\" , \", \", \")\n # double brackets\n string = re.sub(r\"\\(\\s*([^\\)]*?)\\s*\\)\", r\"(\\1)\", string)\n string = re.sub(r\"\\[\\s*([^\\]]*?)\\s*\\]\", r\"[\\1]\", string)\n string = re.sub(r\"{\\s*([^}]*?)\\s*}\", r\"{\\1}\", string)\n string = re.sub(r\"\\\"\\s*([^\\\"]*?)\\s*\\\"\", r'\"\\1\"', string)\n string = re.sub(r\"'\\s*([^']*?)\\s*'\", r\"'\\1'\", string)\n | |
"process_results": "def process_results(doc, results):\n (loglikelihood,) = results\n # IMPORTANT: wikitext counts number of words in *original doc before detokenization*\n _words = len(re.split(r\"\\s+\", doc[\"page\"]))\n _bytes = len(doc[\"page\"].encode(\"utf-8\"))\n return {\n \"word_perplexity\": (loglikelihood, _words),\n \"byte_perplexity\": (loglikelihood, _bytes),\n \"bits_per_byte\": (loglikelihood, _bytes),\n }\n", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "word_perplexity" | |
}, | |
{ | |
"metric": "byte_perplexity" | |
}, | |
{ | |
"metric": "bits_per_byte" | |
} | |
], | |
"output_type": "loglikelihood_rolling", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "{{page}}", | |
"metadata": { | |
"version": 2.0 | |
} | |
}, | |
"winogrande": { | |
"task": "winogrande", | |
"dataset_path": "winogrande", | |
"dataset_name": "winogrande_xl", | |
"dataset_kwargs": { | |
"trust_remote_code": true | |
}, | |
"training_split": "train", | |
"validation_split": "validation", | |
"doc_to_text": "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n return answer_to_num[doc[\"answer\"]]\n", | |
"doc_to_target": "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"_\") + 1\n return doc[\"sentence\"][idx:].strip()\n", | |
"doc_to_choice": "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"_\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"sentence\"][:idx] + opt for opt in options]\n", | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc", | |
"aggregation": "mean", | |
"higher_is_better": true | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": true, | |
"doc_to_decontamination_query": "sentence", | |
"metadata": { | |
"version": 1.0 | |
} | |
}, | |
"wsc": { | |
"task": "wsc", | |
"tag": [ | |
"super-glue-lm-eval-v1" | |
], | |
"dataset_path": "super_glue", | |
"dataset_name": "wsc.fixed", | |
"training_split": "train", | |
"validation_split": "validation", | |
"doc_to_text": "def default_doc_to_text(x):\n raw_passage = x[\"text\"]\n # NOTE: HuggingFace span indices are word-based not character-based.\n pre = \" \".join(raw_passage.split()[: x[\"span2_index\"]])\n post = raw_passage[len(pre) + len(x[\"span2_text\"]) + 1 :]\n passage = general_detokenize(pre + \" *{}*\".format(x[\"span2_text\"]) + post)\n noun = x[\"span1_text\"]\n pronoun = x[\"span2_text\"]\n text = (\n f\"Passage: {passage}\\n\"\n + f'Question: In the passage above, does the pronoun \"*{pronoun}*\" refer to \"*{noun}*\"?\\n'\n + \"Answer:\"\n )\n return text\n", | |
"doc_to_target": "label", | |
"doc_to_choice": [ | |
"no", | |
"yes" | |
], | |
"description": "", | |
"target_delimiter": " ", | |
"fewshot_delimiter": "\n\n", | |
"num_fewshot": 0, | |
"metric_list": [ | |
{ | |
"metric": "acc" | |
} | |
], | |
"output_type": "multiple_choice", | |
"repeats": 1, | |
"should_decontaminate": false, | |
"metadata": { | |
"version": 1.0 | |
} | |
} | |
}, | |
"versions": { | |
"arc_challenge": 1.0, | |
"arc_easy": 1.0, | |
"blimp": 2.0, | |
"blimp_adjunct_island": 1.0, | |
"blimp_anaphor_gender_agreement": 1.0, | |
"blimp_anaphor_number_agreement": 1.0, | |
"blimp_animate_subject_passive": 1.0, | |
"blimp_animate_subject_trans": 1.0, | |
"blimp_causative": 1.0, | |
"blimp_complex_NP_island": 1.0, | |
"blimp_coordinate_structure_constraint_complex_left_branch": 1.0, | |
"blimp_coordinate_structure_constraint_object_extraction": 1.0, | |
"blimp_determiner_noun_agreement_1": 1.0, | |
"blimp_determiner_noun_agreement_2": 1.0, | |
"blimp_determiner_noun_agreement_irregular_1": 1.0, | |
"blimp_determiner_noun_agreement_irregular_2": 1.0, | |
"blimp_determiner_noun_agreement_with_adj_2": 1.0, | |
"blimp_determiner_noun_agreement_with_adj_irregular_1": 1.0, | |
"blimp_determiner_noun_agreement_with_adj_irregular_2": 1.0, | |
"blimp_determiner_noun_agreement_with_adjective_1": 1.0, | |
"blimp_distractor_agreement_relational_noun": 1.0, | |
"blimp_distractor_agreement_relative_clause": 1.0, | |
"blimp_drop_argument": 1.0, | |
"blimp_ellipsis_n_bar_1": 1.0, | |
"blimp_ellipsis_n_bar_2": 1.0, | |
"blimp_existential_there_object_raising": 1.0, | |
"blimp_existential_there_quantifiers_1": 1.0, | |
"blimp_existential_there_quantifiers_2": 1.0, | |
"blimp_existential_there_subject_raising": 1.0, | |
"blimp_expletive_it_object_raising": 1.0, | |
"blimp_inchoative": 1.0, | |
"blimp_intransitive": 1.0, | |
"blimp_irregular_past_participle_adjectives": 1.0, | |
"blimp_irregular_past_participle_verbs": 1.0, | |
"blimp_irregular_plural_subject_verb_agreement_1": 1.0, | |
"blimp_irregular_plural_subject_verb_agreement_2": 1.0, | |
"blimp_left_branch_island_echo_question": 1.0, | |
"blimp_left_branch_island_simple_question": 1.0, | |
"blimp_matrix_question_npi_licensor_present": 1.0, | |
"blimp_npi_present_1": 1.0, | |
"blimp_npi_present_2": 1.0, | |
"blimp_only_npi_licensor_present": 1.0, | |
"blimp_only_npi_scope": 1.0, | |
"blimp_passive_1": 1.0, | |
"blimp_passive_2": 1.0, | |
"blimp_principle_A_c_command": 1.0, | |
"blimp_principle_A_case_1": 1.0, | |
"blimp_principle_A_case_2": 1.0, | |
"blimp_principle_A_domain_1": 1.0, | |
"blimp_principle_A_domain_2": 1.0, | |
"blimp_principle_A_domain_3": 1.0, | |
"blimp_principle_A_reconstruction": 1.0, | |
"blimp_regular_plural_subject_verb_agreement_1": 1.0, | |
"blimp_regular_plural_subject_verb_agreement_2": 1.0, | |
"blimp_sentential_negation_npi_licensor_present": 1.0, | |
"blimp_sentential_negation_npi_scope": 1.0, | |
"blimp_sentential_subject_island": 1.0, | |
"blimp_superlative_quantifiers_1": 1.0, | |
"blimp_superlative_quantifiers_2": 1.0, | |
"blimp_tough_vs_raising_1": 1.0, | |
"blimp_tough_vs_raising_2": 1.0, | |
"blimp_transitive": 1.0, | |
"blimp_wh_island": 1.0, | |
"blimp_wh_questions_object_gap": 1.0, | |
"blimp_wh_questions_subject_gap": 1.0, | |
"blimp_wh_questions_subject_gap_long_distance": 1.0, | |
"blimp_wh_vs_that_no_gap": 1.0, | |
"blimp_wh_vs_that_no_gap_long_distance": 1.0, | |
"blimp_wh_vs_that_with_gap": 1.0, | |
"blimp_wh_vs_that_with_gap_long_distance": 1.0, | |
"lambada_openai": 1.0, | |
"logiqa": 1.0, | |
"mmlu": 2, | |
"mmlu_abstract_algebra": 1.0, | |
"mmlu_anatomy": 1.0, | |
"mmlu_astronomy": 1.0, | |
"mmlu_business_ethics": 1.0, | |
"mmlu_clinical_knowledge": 1.0, | |
"mmlu_college_biology": 1.0, | |
"mmlu_college_chemistry": 1.0, | |
"mmlu_college_computer_science": 1.0, | |
"mmlu_college_mathematics": 1.0, | |
"mmlu_college_medicine": 1.0, | |
"mmlu_college_physics": 1.0, | |
"mmlu_computer_security": 1.0, | |
"mmlu_conceptual_physics": 1.0, | |
"mmlu_econometrics": 1.0, | |
"mmlu_electrical_engineering": 1.0, | |
"mmlu_elementary_mathematics": 1.0, | |
"mmlu_formal_logic": 1.0, | |
"mmlu_global_facts": 1.0, | |
"mmlu_high_school_biology": 1.0, | |
"mmlu_high_school_chemistry": 1.0, | |
"mmlu_high_school_computer_science": 1.0, | |
"mmlu_high_school_european_history": 1.0, | |
"mmlu_high_school_geography": 1.0, | |
"mmlu_high_school_government_and_politics": 1.0, | |
"mmlu_high_school_macroeconomics": 1.0, | |
"mmlu_high_school_mathematics": 1.0, | |
"mmlu_high_school_microeconomics": 1.0, | |
"mmlu_high_school_physics": 1.0, | |
"mmlu_high_school_psychology": 1.0, | |
"mmlu_high_school_statistics": 1.0, | |
"mmlu_high_school_us_history": 1.0, | |
"mmlu_high_school_world_history": 1.0, | |
"mmlu_human_aging": 1.0, | |
"mmlu_human_sexuality": 1.0, | |
"mmlu_humanities": 2, | |
"mmlu_international_law": 1.0, | |
"mmlu_jurisprudence": 1.0, | |
"mmlu_logical_fallacies": 1.0, | |
"mmlu_machine_learning": 1.0, | |
"mmlu_management": 1.0, | |
"mmlu_marketing": 1.0, | |
"mmlu_medical_genetics": 1.0, | |
"mmlu_miscellaneous": 1.0, | |
"mmlu_moral_disputes": 1.0, | |
"mmlu_moral_scenarios": 1.0, | |
"mmlu_nutrition": 1.0, | |
"mmlu_other": 2, | |
"mmlu_philosophy": 1.0, | |
"mmlu_prehistory": 1.0, | |
"mmlu_professional_accounting": 1.0, | |
"mmlu_professional_law": 1.0, | |
"mmlu_professional_medicine": 1.0, | |
"mmlu_professional_psychology": 1.0, | |
"mmlu_public_relations": 1.0, | |
"mmlu_security_studies": 1.0, | |
"mmlu_social_sciences": 2, | |
"mmlu_sociology": 1.0, | |
"mmlu_stem": 2, | |
"mmlu_us_foreign_policy": 1.0, | |
"mmlu_virology": 1.0, | |
"mmlu_world_religions": 1.0, | |
"piqa": 1.0, | |
"sciq": 1.0, | |
"wikitext": 2.0, | |
"winogrande": 1.0, | |
"wsc": 1.0 | |
}, | |
"n-shot": { | |
"arc_challenge": 0, | |
"arc_easy": 0, | |
"blimp_adjunct_island": 0, | |
"blimp_anaphor_gender_agreement": 0, | |
"blimp_anaphor_number_agreement": 0, | |
"blimp_animate_subject_passive": 0, | |
"blimp_animate_subject_trans": 0, | |
"blimp_causative": 0, | |
"blimp_complex_NP_island": 0, | |
"blimp_coordinate_structure_constraint_complex_left_branch": 0, | |
"blimp_coordinate_structure_constraint_object_extraction": 0, | |
"blimp_determiner_noun_agreement_1": 0, | |
"blimp_determiner_noun_agreement_2": 0, | |
"blimp_determiner_noun_agreement_irregular_1": 0, | |
"blimp_determiner_noun_agreement_irregular_2": 0, | |
"blimp_determiner_noun_agreement_with_adj_2": 0, | |
"blimp_determiner_noun_agreement_with_adj_irregular_1": 0, | |
"blimp_determiner_noun_agreement_with_adj_irregular_2": 0, | |
"blimp_determiner_noun_agreement_with_adjective_1": 0, | |
"blimp_distractor_agreement_relational_noun": 0, | |
"blimp_distractor_agreement_relative_clause": 0, | |
"blimp_drop_argument": 0, | |
"blimp_ellipsis_n_bar_1": 0, | |
"blimp_ellipsis_n_bar_2": 0, | |
"blimp_existential_there_object_raising": 0, | |
"blimp_existential_there_quantifiers_1": 0, | |
"blimp_existential_there_quantifiers_2": 0, | |
"blimp_existential_there_subject_raising": 0, | |
"blimp_expletive_it_object_raising": 0, | |
"blimp_inchoative": 0, | |
"blimp_intransitive": 0, | |
"blimp_irregular_past_participle_adjectives": 0, | |
"blimp_irregular_past_participle_verbs": 0, | |
"blimp_irregular_plural_subject_verb_agreement_1": 0, | |
"blimp_irregular_plural_subject_verb_agreement_2": 0, | |
"blimp_left_branch_island_echo_question": 0, | |
"blimp_left_branch_island_simple_question": 0, | |
"blimp_matrix_question_npi_licensor_present": 0, | |
"blimp_npi_present_1": 0, | |
"blimp_npi_present_2": 0, | |
"blimp_only_npi_licensor_present": 0, | |
"blimp_only_npi_scope": 0, | |
"blimp_passive_1": 0, | |
"blimp_passive_2": 0, | |
"blimp_principle_A_c_command": 0, | |
"blimp_principle_A_case_1": 0, | |
"blimp_principle_A_case_2": 0, | |
"blimp_principle_A_domain_1": 0, | |
"blimp_principle_A_domain_2": 0, | |
"blimp_principle_A_domain_3": 0, | |
"blimp_principle_A_reconstruction": 0, | |
"blimp_regular_plural_subject_verb_agreement_1": 0, | |
"blimp_regular_plural_subject_verb_agreement_2": 0, | |
"blimp_sentential_negation_npi_licensor_present": 0, | |
"blimp_sentential_negation_npi_scope": 0, | |
"blimp_sentential_subject_island": 0, | |
"blimp_superlative_quantifiers_1": 0, | |
"blimp_superlative_quantifiers_2": 0, | |
"blimp_tough_vs_raising_1": 0, | |
"blimp_tough_vs_raising_2": 0, | |
"blimp_transitive": 0, | |
"blimp_wh_island": 0, | |
"blimp_wh_questions_object_gap": 0, | |
"blimp_wh_questions_subject_gap": 0, | |
"blimp_wh_questions_subject_gap_long_distance": 0, | |
"blimp_wh_vs_that_no_gap": 0, | |
"blimp_wh_vs_that_no_gap_long_distance": 0, | |
"blimp_wh_vs_that_with_gap": 0, | |
"blimp_wh_vs_that_with_gap_long_distance": 0, | |
"lambada_openai": 0, | |
"logiqa": 0, | |
"mmlu_abstract_algebra": 0, | |
"mmlu_anatomy": 0, | |
"mmlu_astronomy": 0, | |
"mmlu_business_ethics": 0, | |
"mmlu_clinical_knowledge": 0, | |
"mmlu_college_biology": 0, | |
"mmlu_college_chemistry": 0, | |
"mmlu_college_computer_science": 0, | |
"mmlu_college_mathematics": 0, | |
"mmlu_college_medicine": 0, | |
"mmlu_college_physics": 0, | |
"mmlu_computer_security": 0, | |
"mmlu_conceptual_physics": 0, | |
"mmlu_econometrics": 0, | |
"mmlu_electrical_engineering": 0, | |
"mmlu_elementary_mathematics": 0, | |
"mmlu_formal_logic": 0, | |
"mmlu_global_facts": 0, | |
"mmlu_high_school_biology": 0, | |
"mmlu_high_school_chemistry": 0, | |
"mmlu_high_school_computer_science": 0, | |
"mmlu_high_school_european_history": 0, | |
"mmlu_high_school_geography": 0, | |
"mmlu_high_school_government_and_politics": 0, | |
"mmlu_high_school_macroeconomics": 0, | |
"mmlu_high_school_mathematics": 0, | |
"mmlu_high_school_microeconomics": 0, | |
"mmlu_high_school_physics": 0, | |
"mmlu_high_school_psychology": 0, | |
"mmlu_high_school_statistics": 0, | |
"mmlu_high_school_us_history": 0, | |
"mmlu_high_school_world_history": 0, | |
"mmlu_human_aging": 0, | |
"mmlu_human_sexuality": 0, | |
"mmlu_international_law": 0, | |
"mmlu_jurisprudence": 0, | |
"mmlu_logical_fallacies": 0, | |
"mmlu_machine_learning": 0, | |
"mmlu_management": 0, | |
"mmlu_marketing": 0, | |
"mmlu_medical_genetics": 0, | |
"mmlu_miscellaneous": 0, | |
"mmlu_moral_disputes": 0, | |
"mmlu_moral_scenarios": 0, | |
"mmlu_nutrition": 0, | |
"mmlu_philosophy": 0, | |
"mmlu_prehistory": 0, | |
"mmlu_professional_accounting": 0, | |
"mmlu_professional_law": 0, | |
"mmlu_professional_medicine": 0, | |
"mmlu_professional_psychology": 0, | |
"mmlu_public_relations": 0, | |
"mmlu_security_studies": 0, | |
"mmlu_sociology": 0, | |
"mmlu_us_foreign_policy": 0, | |
"mmlu_virology": 0, | |
"mmlu_world_religions": 0, | |
"piqa": 0, | |
"sciq": 0, | |
"wikitext": 0, | |
"winogrande": 0, | |
"wsc": 0 | |
}, | |
"higher_is_better": { | |
"arc_challenge": { | |
"acc": true, | |
"acc_norm": true | |
}, | |
"arc_easy": { | |
"acc": true, | |
"acc_norm": true | |
}, | |
"blimp": { | |
"acc": true | |
}, | |
"blimp_adjunct_island": { | |
"acc": true | |
}, | |
"blimp_anaphor_gender_agreement": { | |
"acc": true | |
}, | |
"blimp_anaphor_number_agreement": { | |
"acc": true | |
}, | |
"blimp_animate_subject_passive": { | |
"acc": true | |
}, | |
"blimp_animate_subject_trans": { | |
"acc": true | |
}, | |
"blimp_causative": { | |
"acc": true | |
}, | |
"blimp_complex_NP_island": { | |
"acc": true | |
}, | |
"blimp_coordinate_structure_constraint_complex_left_branch": { | |
"acc": true | |
}, | |
"blimp_coordinate_structure_constraint_object_extraction": { | |
"acc": true | |
}, | |
"blimp_determiner_noun_agreement_1": { | |
"acc": true | |
}, | |
"blimp_determiner_noun_agreement_2": { | |
"acc": true | |
}, | |
"blimp_determiner_noun_agreement_irregular_1": { | |
"acc": true | |
}, | |
"blimp_determiner_noun_agreement_irregular_2": { | |
"acc": true | |
}, | |
"blimp_determiner_noun_agreement_with_adj_2": { | |
"acc": true | |
}, | |
"blimp_determiner_noun_agreement_with_adj_irregular_1": { | |
"acc": true | |
}, | |
"blimp_determiner_noun_agreement_with_adj_irregular_2": { | |
"acc": true | |
}, | |
"blimp_determiner_noun_agreement_with_adjective_1": { | |
"acc": true | |
}, | |
"blimp_distractor_agreement_relational_noun": { | |
"acc": true | |
}, | |
"blimp_distractor_agreement_relative_clause": { | |
"acc": true | |
}, | |
"blimp_drop_argument": { | |
"acc": true | |
}, | |
"blimp_ellipsis_n_bar_1": { | |
"acc": true | |
}, | |
"blimp_ellipsis_n_bar_2": { | |
"acc": true | |
}, | |
"blimp_existential_there_object_raising": { | |
"acc": true | |
}, | |
"blimp_existential_there_quantifiers_1": { | |
"acc": true | |
}, | |
"blimp_existential_there_quantifiers_2": { | |
"acc": true | |
}, | |
"blimp_existential_there_subject_raising": { | |
"acc": true | |
}, | |
"blimp_expletive_it_object_raising": { | |
"acc": true | |
}, | |
"blimp_inchoative": { | |
"acc": true | |
}, | |
"blimp_intransitive": { | |
"acc": true | |
}, | |
"blimp_irregular_past_participle_adjectives": { | |
"acc": true | |
}, | |
"blimp_irregular_past_participle_verbs": { | |
"acc": true | |
}, | |
"blimp_irregular_plural_subject_verb_agreement_1": { | |
"acc": true | |
}, | |
"blimp_irregular_plural_subject_verb_agreement_2": { | |
"acc": true | |
}, | |
"blimp_left_branch_island_echo_question": { | |
"acc": true | |
}, | |
"blimp_left_branch_island_simple_question": { | |
"acc": true | |
}, | |
"blimp_matrix_question_npi_licensor_present": { | |
"acc": true | |
}, | |
"blimp_npi_present_1": { | |
"acc": true | |
}, | |
"blimp_npi_present_2": { | |
"acc": true | |
}, | |
"blimp_only_npi_licensor_present": { | |
"acc": true | |
}, | |
"blimp_only_npi_scope": { | |
"acc": true | |
}, | |
"blimp_passive_1": { | |
"acc": true | |
}, | |
"blimp_passive_2": { | |
"acc": true | |
}, | |
"blimp_principle_A_c_command": { | |
"acc": true | |
}, | |
"blimp_principle_A_case_1": { | |
"acc": true | |
}, | |
"blimp_principle_A_case_2": { | |
"acc": true | |
}, | |
"blimp_principle_A_domain_1": { | |
"acc": true | |
}, | |
"blimp_principle_A_domain_2": { | |
"acc": true | |
}, | |
"blimp_principle_A_domain_3": { | |
"acc": true | |
}, | |
"blimp_principle_A_reconstruction": { | |
"acc": true | |
}, | |
"blimp_regular_plural_subject_verb_agreement_1": { | |
"acc": true | |
}, | |
"blimp_regular_plural_subject_verb_agreement_2": { | |
"acc": true | |
}, | |
"blimp_sentential_negation_npi_licensor_present": { | |
"acc": true | |
}, | |
"blimp_sentential_negation_npi_scope": { | |
"acc": true | |
}, | |
"blimp_sentential_subject_island": { | |
"acc": true | |
}, | |
"blimp_superlative_quantifiers_1": { | |
"acc": true | |
}, | |
"blimp_superlative_quantifiers_2": { | |
"acc": true | |
}, | |
"blimp_tough_vs_raising_1": { | |
"acc": true | |
}, | |
"blimp_tough_vs_raising_2": { | |
"acc": true | |
}, | |
"blimp_transitive": { | |
"acc": true | |
}, | |
"blimp_wh_island": { | |
"acc": true | |
}, | |
"blimp_wh_questions_object_gap": { | |
"acc": true | |
}, | |
"blimp_wh_questions_subject_gap": { | |
"acc": true | |
}, | |
"blimp_wh_questions_subject_gap_long_distance": { | |
"acc": true | |
}, | |
"blimp_wh_vs_that_no_gap": { | |
"acc": true | |
}, | |
"blimp_wh_vs_that_no_gap_long_distance": { | |
"acc": true | |
}, | |
"blimp_wh_vs_that_with_gap": { | |
"acc": true | |
}, | |
"blimp_wh_vs_that_with_gap_long_distance": { | |
"acc": true | |
}, | |
"lambada_openai": { | |
"perplexity": false, | |
"acc": true | |
}, | |
"logiqa": { | |
"acc": true, | |
"acc_norm": true | |
}, | |
"mmlu": { | |
"acc": true | |
}, | |
"mmlu_abstract_algebra": { | |
"acc": true | |
}, | |
"mmlu_anatomy": { | |
"acc": true | |
}, | |
"mmlu_astronomy": { | |
"acc": true | |
}, | |
"mmlu_business_ethics": { | |
"acc": true | |
}, | |
"mmlu_clinical_knowledge": { | |
"acc": true | |
}, | |
"mmlu_college_biology": { | |
"acc": true | |
}, | |
"mmlu_college_chemistry": { | |
"acc": true | |
}, | |
"mmlu_college_computer_science": { | |
"acc": true | |
}, | |
"mmlu_college_mathematics": { | |
"acc": true | |
}, | |
"mmlu_college_medicine": { | |
"acc": true | |
}, | |
"mmlu_college_physics": { | |
"acc": true | |
}, | |
"mmlu_computer_security": { | |
"acc": true | |
}, | |
"mmlu_conceptual_physics": { | |
"acc": true | |
}, | |
"mmlu_econometrics": { | |
"acc": true | |
}, | |
"mmlu_electrical_engineering": { | |
"acc": true | |
}, | |
"mmlu_elementary_mathematics": { | |
"acc": true | |
}, | |
"mmlu_formal_logic": { | |
"acc": true | |
}, | |
"mmlu_global_facts": { | |
"acc": true | |
}, | |
"mmlu_high_school_biology": { | |
"acc": true | |
}, | |
"mmlu_high_school_chemistry": { | |
"acc": true | |
}, | |
"mmlu_high_school_computer_science": { | |
"acc": true | |
}, | |
"mmlu_high_school_european_history": { | |
"acc": true | |
}, | |
"mmlu_high_school_geography": { | |
"acc": true | |
}, | |
"mmlu_high_school_government_and_politics": { | |
"acc": true | |
}, | |
"mmlu_high_school_macroeconomics": { | |
"acc": true | |
}, | |
"mmlu_high_school_mathematics": { | |
"acc": true | |
}, | |
"mmlu_high_school_microeconomics": { | |
"acc": true | |
}, | |
"mmlu_high_school_physics": { | |
"acc": true | |
}, | |
"mmlu_high_school_psychology": { | |
"acc": true | |
}, | |
"mmlu_high_school_statistics": { | |
"acc": true | |
}, | |
"mmlu_high_school_us_history": { | |
"acc": true | |
}, | |
"mmlu_high_school_world_history": { | |
"acc": true | |
}, | |
"mmlu_human_aging": { | |
"acc": true | |
}, | |
"mmlu_human_sexuality": { | |
"acc": true | |
}, | |
"mmlu_humanities": { | |
"acc": true | |
}, | |
"mmlu_international_law": { | |
"acc": true | |
}, | |
"mmlu_jurisprudence": { | |
"acc": true | |
}, | |
"mmlu_logical_fallacies": { | |
"acc": true | |
}, | |
"mmlu_machine_learning": { | |
"acc": true | |
}, | |
"mmlu_management": { | |
"acc": true | |
}, | |
"mmlu_marketing": { | |
"acc": true | |
}, | |
"mmlu_medical_genetics": { | |
"acc": true | |
}, | |
"mmlu_miscellaneous": { | |
"acc": true | |
}, | |
"mmlu_moral_disputes": { | |
"acc": true | |
}, | |
"mmlu_moral_scenarios": { | |
"acc": true | |
}, | |
"mmlu_nutrition": { | |
"acc": true | |
}, | |
"mmlu_other": { | |
"acc": true | |
}, | |
"mmlu_philosophy": { | |
"acc": true | |
}, | |
"mmlu_prehistory": { | |
"acc": true | |
}, | |
"mmlu_professional_accounting": { | |
"acc": true | |
}, | |
"mmlu_professional_law": { | |
"acc": true | |
}, | |
"mmlu_professional_medicine": { | |
"acc": true | |
}, | |
"mmlu_professional_psychology": { | |
"acc": true | |
}, | |
"mmlu_public_relations": { | |
"acc": true | |
}, | |
"mmlu_security_studies": { | |
"acc": true | |
}, | |
"mmlu_social_sciences": { | |
"acc": true | |
}, | |
"mmlu_sociology": { | |
"acc": true | |
}, | |
"mmlu_stem": { | |
"acc": true | |
}, | |
"mmlu_us_foreign_policy": { | |
"acc": true | |
}, | |
"mmlu_virology": { | |
"acc": true | |
}, | |
"mmlu_world_religions": { | |
"acc": true | |
}, | |
"piqa": { | |
"acc": true, | |
"acc_norm": true | |
}, | |
"sciq": { | |
"acc": true, | |
"acc_norm": true | |
}, | |
"wikitext": { | |
"word_perplexity": false, | |
"byte_perplexity": false, | |
"bits_per_byte": false | |
}, | |
"winogrande": { | |
"acc": true | |
}, | |
"wsc": { | |
"acc": true | |
} | |
}, | |
"n-samples": { | |
"wsc": { | |
"original": 104, | |
"effective": 104 | |
}, | |
"winogrande": { | |
"original": 1267, | |
"effective": 1267 | |
}, | |
"wikitext": { | |
"original": 62, | |
"effective": 62 | |
}, | |
"sciq": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"piqa": { | |
"original": 1838, | |
"effective": 1838 | |
}, | |
"mmlu_elementary_mathematics": { | |
"original": 378, | |
"effective": 378 | |
}, | |
"mmlu_electrical_engineering": { | |
"original": 145, | |
"effective": 145 | |
}, | |
"mmlu_high_school_computer_science": { | |
"original": 100, | |
"effective": 100 | |
}, | |
"mmlu_high_school_physics": { | |
"original": 151, | |
"effective": 151 | |
}, | |
"mmlu_college_mathematics": { | |
"original": 100, | |
"effective": 100 | |
}, | |
"mmlu_college_chemistry": { | |
"original": 100, | |
"effective": 100 | |
}, | |
"mmlu_machine_learning": { | |
"original": 112, | |
"effective": 112 | |
}, | |
"mmlu_high_school_mathematics": { | |
"original": 270, | |
"effective": 270 | |
}, | |
"mmlu_computer_security": { | |
"original": 100, | |
"effective": 100 | |
}, | |
"mmlu_conceptual_physics": { | |
"original": 235, | |
"effective": 235 | |
}, | |
"mmlu_high_school_statistics": { | |
"original": 216, | |
"effective": 216 | |
}, | |
"mmlu_high_school_biology": { | |
"original": 310, | |
"effective": 310 | |
}, | |
"mmlu_astronomy": { | |
"original": 152, | |
"effective": 152 | |
}, | |
"mmlu_college_computer_science": { | |
"original": 100, | |
"effective": 100 | |
}, | |
"mmlu_college_biology": { | |
"original": 144, | |
"effective": 144 | |
}, | |
"mmlu_college_physics": { | |
"original": 102, | |
"effective": 102 | |
}, | |
"mmlu_anatomy": { | |
"original": 135, | |
"effective": 135 | |
}, | |
"mmlu_high_school_chemistry": { | |
"original": 203, | |
"effective": 203 | |
}, | |
"mmlu_abstract_algebra": { | |
"original": 100, | |
"effective": 100 | |
}, | |
"mmlu_college_medicine": { | |
"original": 173, | |
"effective": 173 | |
}, | |
"mmlu_medical_genetics": { | |
"original": 100, | |
"effective": 100 | |
}, | |
"mmlu_business_ethics": { | |
"original": 100, | |
"effective": 100 | |
}, | |
"mmlu_miscellaneous": { | |
"original": 783, | |
"effective": 783 | |
}, | |
"mmlu_nutrition": { | |
"original": 306, | |
"effective": 306 | |
}, | |
"mmlu_clinical_knowledge": { | |
"original": 265, | |
"effective": 265 | |
}, | |
"mmlu_human_aging": { | |
"original": 223, | |
"effective": 223 | |
}, | |
"mmlu_professional_accounting": { | |
"original": 282, | |
"effective": 282 | |
}, | |
"mmlu_marketing": { | |
"original": 234, | |
"effective": 234 | |
}, | |
"mmlu_global_facts": { | |
"original": 100, | |
"effective": 100 | |
}, | |
"mmlu_professional_medicine": { | |
"original": 272, | |
"effective": 272 | |
}, | |
"mmlu_virology": { | |
"original": 166, | |
"effective": 166 | |
}, | |
"mmlu_management": { | |
"original": 103, | |
"effective": 103 | |
}, | |
"mmlu_us_foreign_policy": { | |
"original": 100, | |
"effective": 100 | |
}, | |
"mmlu_sociology": { | |
"original": 201, | |
"effective": 201 | |
}, | |
"mmlu_econometrics": { | |
"original": 114, | |
"effective": 114 | |
}, | |
"mmlu_security_studies": { | |
"original": 245, | |
"effective": 245 | |
}, | |
"mmlu_high_school_geography": { | |
"original": 198, | |
"effective": 198 | |
}, | |
"mmlu_public_relations": { | |
"original": 110, | |
"effective": 110 | |
}, | |
"mmlu_high_school_microeconomics": { | |
"original": 238, | |
"effective": 238 | |
}, | |
"mmlu_professional_psychology": { | |
"original": 612, | |
"effective": 612 | |
}, | |
"mmlu_high_school_macroeconomics": { | |
"original": 390, | |
"effective": 390 | |
}, | |
"mmlu_human_sexuality": { | |
"original": 131, | |
"effective": 131 | |
}, | |
"mmlu_high_school_government_and_politics": { | |
"original": 193, | |
"effective": 193 | |
}, | |
"mmlu_high_school_psychology": { | |
"original": 545, | |
"effective": 545 | |
}, | |
"mmlu_moral_disputes": { | |
"original": 346, | |
"effective": 346 | |
}, | |
"mmlu_high_school_world_history": { | |
"original": 237, | |
"effective": 237 | |
}, | |
"mmlu_jurisprudence": { | |
"original": 108, | |
"effective": 108 | |
}, | |
"mmlu_philosophy": { | |
"original": 311, | |
"effective": 311 | |
}, | |
"mmlu_high_school_us_history": { | |
"original": 204, | |
"effective": 204 | |
}, | |
"mmlu_professional_law": { | |
"original": 1534, | |
"effective": 1534 | |
}, | |
"mmlu_logical_fallacies": { | |
"original": 163, | |
"effective": 163 | |
}, | |
"mmlu_moral_scenarios": { | |
"original": 895, | |
"effective": 895 | |
}, | |
"mmlu_formal_logic": { | |
"original": 126, | |
"effective": 126 | |
}, | |
"mmlu_prehistory": { | |
"original": 324, | |
"effective": 324 | |
}, | |
"mmlu_high_school_european_history": { | |
"original": 165, | |
"effective": 165 | |
}, | |
"mmlu_world_religions": { | |
"original": 171, | |
"effective": 171 | |
}, | |
"mmlu_international_law": { | |
"original": 121, | |
"effective": 121 | |
}, | |
"logiqa": { | |
"original": 651, | |
"effective": 651 | |
}, | |
"lambada_openai": { | |
"original": 5153, | |
"effective": 5153 | |
}, | |
"blimp_adjunct_island": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_anaphor_gender_agreement": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_anaphor_number_agreement": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_animate_subject_passive": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_animate_subject_trans": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_causative": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_complex_NP_island": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_coordinate_structure_constraint_complex_left_branch": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_coordinate_structure_constraint_object_extraction": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_determiner_noun_agreement_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_determiner_noun_agreement_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_determiner_noun_agreement_irregular_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_determiner_noun_agreement_irregular_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_determiner_noun_agreement_with_adj_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_determiner_noun_agreement_with_adj_irregular_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_determiner_noun_agreement_with_adj_irregular_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_determiner_noun_agreement_with_adjective_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_distractor_agreement_relational_noun": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_distractor_agreement_relative_clause": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_drop_argument": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_ellipsis_n_bar_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_ellipsis_n_bar_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_existential_there_object_raising": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_existential_there_quantifiers_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_existential_there_quantifiers_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_existential_there_subject_raising": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_expletive_it_object_raising": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_inchoative": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_intransitive": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_irregular_past_participle_adjectives": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_irregular_past_participle_verbs": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_irregular_plural_subject_verb_agreement_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_irregular_plural_subject_verb_agreement_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_left_branch_island_echo_question": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_left_branch_island_simple_question": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_matrix_question_npi_licensor_present": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_npi_present_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_npi_present_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_only_npi_licensor_present": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_only_npi_scope": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_passive_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_passive_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_principle_A_c_command": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_principle_A_case_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_principle_A_case_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_principle_A_domain_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_principle_A_domain_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_principle_A_domain_3": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_principle_A_reconstruction": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_regular_plural_subject_verb_agreement_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_regular_plural_subject_verb_agreement_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_sentential_negation_npi_licensor_present": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_sentential_negation_npi_scope": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_sentential_subject_island": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_superlative_quantifiers_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_superlative_quantifiers_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_tough_vs_raising_1": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_tough_vs_raising_2": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_transitive": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_wh_island": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_wh_questions_object_gap": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_wh_questions_subject_gap": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_wh_questions_subject_gap_long_distance": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_wh_vs_that_no_gap": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_wh_vs_that_no_gap_long_distance": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_wh_vs_that_with_gap": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"blimp_wh_vs_that_with_gap_long_distance": { | |
"original": 1000, | |
"effective": 1000 | |
}, | |
"arc_challenge": { | |
"original": 1172, | |
"effective": 1172 | |
}, | |
"arc_easy": { | |
"original": 2376, | |
"effective": 2376 | |
} | |
}, | |
"config": { | |
"model": "hf", | |
"model_args": "pretrained=EleutherAI/pythia-70m,revision=step8,dtype=float,trust_remote_code=True", | |
"model_num_parameters": 70426624, | |
"model_dtype": "torch.float32", | |
"model_revision": "step8", | |
"model_sha": "8f0dfb271a4510191bfc46ac9b7093b3bd069e85", | |
"batch_size": "8", | |
"batch_sizes": [], | |
"device": "cuda:0", | |
"use_cache": null, | |
"limit": null, | |
"bootstrap_iters": 100000, | |
"gen_kwargs": null, | |
"random_seed": 0, | |
"numpy_seed": 1234, | |
"torch_seed": 1234, | |
"fewshot_seed": 1234 | |
}, | |
"git_hash": "a5b7c41", | |
"date": 1729867795.0246165, | |
"pretty_env_info": "PyTorch version: 2.5.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.3 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: 14.0.0-1ubuntu1.1\nCMake version: version 3.30.5\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.1.85+-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.2.140\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: NVIDIA A100-SXM4-40GB\nNvidia driver version: 535.104.05\ncuDNN version: Probably one of the following:\n/usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.6\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 12\nOn-line CPU(s) list: 0-11\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 6\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.30\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 192 KiB (6 instances)\nL1i cache: 192 KiB (6 instances)\nL2 cache: 6 MiB (6 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-11\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable\nVulnerability Reg file data sampling: Not affected\nVulnerability Retbleed: Vulnerable\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Vulnerable\nVulnerability Spectre v1: Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers\nVulnerability Spectre v2: Vulnerable; IBPB: disabled; STIBP: disabled; PBRSB-eIBRS: Vulnerable; BHI: Vulnerable (Syscall hardening enabled)\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Vulnerable\n\nVersions of relevant libraries:\n[pip3] mypy==1.13.0\n[pip3] mypy-extensions==1.0.0\n[pip3] numpy==1.26.4\n[pip3] optree==0.13.0\n[pip3] torch==2.5.0+cu121\n[pip3] torchaudio==2.5.0+cu121\n[pip3] torchsummary==1.5.1\n[pip3] torchvision==0.20.0+cu121\n[conda] Could not collect", | |
"transformers_version": "4.44.2", | |
"upper_git_hash": null, | |
"tokenizer_pad_token": [ | |
"<|endoftext|>", | |
"0" | |
], | |
"tokenizer_eos_token": [ | |
"<|endoftext|>", | |
"0" | |
], | |
"tokenizer_bos_token": [ | |
"<|endoftext|>", | |
"0" | |
], | |
"eot_token_id": 0, | |
"max_length": 2048, | |
"task_hashes": {}, | |
"model_source": "hf", | |
"model_name": "EleutherAI/pythia-70m", | |
"model_name_sanitized": "EleutherAI__pythia-70m", | |
"system_instruction": null, | |
"system_instruction_sha": null, | |
"fewshot_as_multiturn": false, | |
"chat_template": null, | |
"chat_template_sha": null, | |
"start_time": 2676.62866295, | |
"end_time": 3257.031684832, | |
"total_evaluation_time_seconds": "580.4030218819998" | |
} |