diff --git "a/EleutherAI/pythia-70m/step_4" "b/EleutherAI/pythia-70m/step_4" new file mode 100644--- /dev/null +++ "b/EleutherAI/pythia-70m/step_4" @@ -0,0 +1,6456 @@ +{ + "results": { + "arc_challenge": { + "alias": "arc_challenge", + "acc,none": 0.2158703071672355, + "acc_stderr,none": 0.012022975360030668, + "acc_norm,none": 0.24573378839590443, + "acc_norm_stderr,none": 0.012581033453730114 + }, + "arc_easy": { + "alias": "arc_easy", + "acc,none": 0.2727272727272727, + "acc_stderr,none": 0.00913863072636423, + "acc_norm,none": 0.2521043771043771, + "acc_norm_stderr,none": 0.008910024163218178 + }, + "blimp": { + "acc,none": 0.5266119402985074, + "acc_stderr,none": 0.0018730484493487852, + "alias": "blimp" + }, + "blimp_adjunct_island": { + "alias": " - blimp_adjunct_island", + "acc,none": 0.534, + "acc_stderr,none": 0.015782683329937625 + }, + "blimp_anaphor_gender_agreement": { + "alias": " - blimp_anaphor_gender_agreement", + "acc,none": 0.614, + "acc_stderr,none": 0.015402637476784374 + }, + "blimp_anaphor_number_agreement": { + "alias": " - blimp_anaphor_number_agreement", + "acc,none": 0.563, + "acc_stderr,none": 0.015693223928730373 + }, + "blimp_animate_subject_passive": { + "alias": " - blimp_animate_subject_passive", + "acc,none": 0.606, + "acc_stderr,none": 0.015459721957493377 + }, + "blimp_animate_subject_trans": { + "alias": " - blimp_animate_subject_trans", + "acc,none": 0.802, + "acc_stderr,none": 0.0126077339341753 + }, + "blimp_causative": { + "alias": " - blimp_causative", + "acc,none": 0.396, + "acc_stderr,none": 0.015473313265859408 + }, + "blimp_complex_NP_island": { + "alias": " - blimp_complex_NP_island", + "acc,none": 0.472, + "acc_stderr,none": 0.015794475789511476 + }, + "blimp_coordinate_structure_constraint_complex_left_branch": { + "alias": " - blimp_coordinate_structure_constraint_complex_left_branch", + "acc,none": 0.525, + "acc_stderr,none": 0.01579951342999601 + }, + "blimp_coordinate_structure_constraint_object_extraction": { + "alias": " - blimp_coordinate_structure_constraint_object_extraction", + "acc,none": 0.63, + "acc_stderr,none": 0.015275252316519362 + }, + "blimp_determiner_noun_agreement_1": { + "alias": " - blimp_determiner_noun_agreement_1", + "acc,none": 0.511, + "acc_stderr,none": 0.015815471195292686 + }, + "blimp_determiner_noun_agreement_2": { + "alias": " - blimp_determiner_noun_agreement_2", + "acc,none": 0.516, + "acc_stderr,none": 0.015811198373114878 + }, + "blimp_determiner_noun_agreement_irregular_1": { + "alias": " - blimp_determiner_noun_agreement_irregular_1", + "acc,none": 0.492, + "acc_stderr,none": 0.015817274929209008 + }, + "blimp_determiner_noun_agreement_irregular_2": { + "alias": " - blimp_determiner_noun_agreement_irregular_2", + "acc,none": 0.488, + "acc_stderr,none": 0.015814743314581818 + }, + "blimp_determiner_noun_agreement_with_adj_2": { + "alias": " - blimp_determiner_noun_agreement_with_adj_2", + "acc,none": 0.51, + "acc_stderr,none": 0.015816135752773207 + }, + "blimp_determiner_noun_agreement_with_adj_irregular_1": { + "alias": " - blimp_determiner_noun_agreement_with_adj_irregular_1", + "acc,none": 0.49, + "acc_stderr,none": 0.015816135752773207 + }, + "blimp_determiner_noun_agreement_with_adj_irregular_2": { + "alias": " - blimp_determiner_noun_agreement_with_adj_irregular_2", + "acc,none": 0.496, + "acc_stderr,none": 0.015818793703510886 + }, + "blimp_determiner_noun_agreement_with_adjective_1": { + "alias": " - blimp_determiner_noun_agreement_with_adjective_1", + "acc,none": 0.511, + "acc_stderr,none": 0.015815471195292686 + }, + "blimp_distractor_agreement_relational_noun": { + "alias": " - blimp_distractor_agreement_relational_noun", + "acc,none": 0.511, + "acc_stderr,none": 0.015815471195292682 + }, + "blimp_distractor_agreement_relative_clause": { + "alias": " - blimp_distractor_agreement_relative_clause", + "acc,none": 0.497, + "acc_stderr,none": 0.015819015179246724 + }, + "blimp_drop_argument": { + "alias": " - blimp_drop_argument", + "acc,none": 0.663, + "acc_stderr,none": 0.014955087918653607 + }, + "blimp_ellipsis_n_bar_1": { + "alias": " - blimp_ellipsis_n_bar_1", + "acc,none": 0.494, + "acc_stderr,none": 0.015818160898606715 + }, + "blimp_ellipsis_n_bar_2": { + "alias": " - blimp_ellipsis_n_bar_2", + "acc,none": 0.326, + "acc_stderr,none": 0.01483050720454104 + }, + "blimp_existential_there_object_raising": { + "alias": " - blimp_existential_there_object_raising", + "acc,none": 0.612, + "acc_stderr,none": 0.015417317979911077 + }, + "blimp_existential_there_quantifiers_1": { + "alias": " - blimp_existential_there_quantifiers_1", + "acc,none": 0.909, + "acc_stderr,none": 0.00909954953840023 + }, + "blimp_existential_there_quantifiers_2": { + "alias": " - blimp_existential_there_quantifiers_2", + "acc,none": 0.615, + "acc_stderr,none": 0.01539519444541081 + }, + "blimp_existential_there_subject_raising": { + "alias": " - blimp_existential_there_subject_raising", + "acc,none": 0.533, + "acc_stderr,none": 0.01578480789113878 + }, + "blimp_expletive_it_object_raising": { + "alias": " - blimp_expletive_it_object_raising", + "acc,none": 0.569, + "acc_stderr,none": 0.015667944488173494 + }, + "blimp_inchoative": { + "alias": " - blimp_inchoative", + "acc,none": 0.389, + "acc_stderr,none": 0.015424555647308488 + }, + "blimp_intransitive": { + "alias": " - blimp_intransitive", + "acc,none": 0.557, + "acc_stderr,none": 0.015716169953204105 + }, + "blimp_irregular_past_participle_adjectives": { + "alias": " - blimp_irregular_past_participle_adjectives", + "acc,none": 0.293, + "acc_stderr,none": 0.014399942998441273 + }, + "blimp_irregular_past_participle_verbs": { + "alias": " - blimp_irregular_past_participle_verbs", + "acc,none": 0.463, + "acc_stderr,none": 0.015775927227262423 + }, + "blimp_irregular_plural_subject_verb_agreement_1": { + "alias": " - blimp_irregular_plural_subject_verb_agreement_1", + "acc,none": 0.493, + "acc_stderr,none": 0.01581774956184357 + }, + "blimp_irregular_plural_subject_verb_agreement_2": { + "alias": " - blimp_irregular_plural_subject_verb_agreement_2", + "acc,none": 0.526, + "acc_stderr,none": 0.015797897758042745 + }, + "blimp_left_branch_island_echo_question": { + "alias": " - blimp_left_branch_island_echo_question", + "acc,none": 0.596, + "acc_stderr,none": 0.015524980677122581 + }, + "blimp_left_branch_island_simple_question": { + "alias": " - blimp_left_branch_island_simple_question", + "acc,none": 0.51, + "acc_stderr,none": 0.01581613575277321 + }, + "blimp_matrix_question_npi_licensor_present": { + "alias": " - blimp_matrix_question_npi_licensor_present", + "acc,none": 0.339, + "acc_stderr,none": 0.014976758771620347 + }, + "blimp_npi_present_1": { + "alias": " - blimp_npi_present_1", + "acc,none": 0.406, + "acc_stderr,none": 0.015537226438634602 + }, + "blimp_npi_present_2": { + "alias": " - blimp_npi_present_2", + "acc,none": 0.341, + "acc_stderr,none": 0.014998131348402714 + }, + "blimp_only_npi_licensor_present": { + "alias": " - blimp_only_npi_licensor_present", + "acc,none": 0.377, + "acc_stderr,none": 0.015333170125779855 + }, + "blimp_only_npi_scope": { + "alias": " - blimp_only_npi_scope", + "acc,none": 0.599, + "acc_stderr,none": 0.015506109745498322 + }, + "blimp_passive_1": { + "alias": " - blimp_passive_1", + "acc,none": 0.664, + "acc_stderr,none": 0.014944140233795023 + }, + "blimp_passive_2": { + "alias": " - blimp_passive_2", + "acc,none": 0.602, + "acc_stderr,none": 0.01548663410285892 + }, + "blimp_principle_A_c_command": { + "alias": " - blimp_principle_A_c_command", + "acc,none": 0.317, + "acc_stderr,none": 0.014721675438880234 + }, + "blimp_principle_A_case_1": { + "alias": " - blimp_principle_A_case_1", + "acc,none": 0.844, + "acc_stderr,none": 0.011480235006122365 + }, + "blimp_principle_A_case_2": { + "alias": " - blimp_principle_A_case_2", + "acc,none": 0.5, + "acc_stderr,none": 0.015819299929208316 + }, + "blimp_principle_A_domain_1": { + "alias": " - blimp_principle_A_domain_1", + "acc,none": 0.53, + "acc_stderr,none": 0.015790799515836763 + }, + "blimp_principle_A_domain_2": { + "alias": " - blimp_principle_A_domain_2", + "acc,none": 0.512, + "acc_stderr,none": 0.015814743314581818 + }, + "blimp_principle_A_domain_3": { + "alias": " - blimp_principle_A_domain_3", + "acc,none": 0.509, + "acc_stderr,none": 0.015816736995005392 + }, + "blimp_principle_A_reconstruction": { + "alias": " - blimp_principle_A_reconstruction", + "acc,none": 0.448, + "acc_stderr,none": 0.015733516566347833 + }, + "blimp_regular_plural_subject_verb_agreement_1": { + "alias": " - blimp_regular_plural_subject_verb_agreement_1", + "acc,none": 0.389, + "acc_stderr,none": 0.01542455564730849 + }, + "blimp_regular_plural_subject_verb_agreement_2": { + "alias": " - blimp_regular_plural_subject_verb_agreement_2", + "acc,none": 0.513, + "acc_stderr,none": 0.015813952101896622 + }, + "blimp_sentential_negation_npi_licensor_present": { + "alias": " - blimp_sentential_negation_npi_licensor_present", + "acc,none": 0.644, + "acc_stderr,none": 0.015149042659306623 + }, + "blimp_sentential_negation_npi_scope": { + "alias": " - blimp_sentential_negation_npi_scope", + "acc,none": 0.725, + "acc_stderr,none": 0.014127086556490528 + }, + "blimp_sentential_subject_island": { + "alias": " - blimp_sentential_subject_island", + "acc,none": 0.46, + "acc_stderr,none": 0.01576859691439438 + }, + "blimp_superlative_quantifiers_1": { + "alias": " - blimp_superlative_quantifiers_1", + "acc,none": 0.777, + "acc_stderr,none": 0.013169830843425668 + }, + "blimp_superlative_quantifiers_2": { + "alias": " - blimp_superlative_quantifiers_2", + "acc,none": 0.62, + "acc_stderr,none": 0.015356947477797585 + }, + "blimp_tough_vs_raising_1": { + "alias": " - blimp_tough_vs_raising_1", + "acc,none": 0.413, + "acc_stderr,none": 0.01557798682993653 + }, + "blimp_tough_vs_raising_2": { + "alias": " - blimp_tough_vs_raising_2", + "acc,none": 0.616, + "acc_stderr,none": 0.01538768276189707 + }, + "blimp_transitive": { + "alias": " - blimp_transitive", + "acc,none": 0.518, + "acc_stderr,none": 0.015809045699406728 + }, + "blimp_wh_island": { + "alias": " - blimp_wh_island", + "acc,none": 0.611, + "acc_stderr,none": 0.015424555647308495 + }, + "blimp_wh_questions_object_gap": { + "alias": " - blimp_wh_questions_object_gap", + "acc,none": 0.483, + "acc_stderr,none": 0.015810153729833434 + }, + "blimp_wh_questions_subject_gap": { + "alias": " - blimp_wh_questions_subject_gap", + "acc,none": 0.439, + "acc_stderr,none": 0.015701131345400774 + }, + "blimp_wh_questions_subject_gap_long_distance": { + "alias": " - blimp_wh_questions_subject_gap_long_distance", + "acc,none": 0.406, + "acc_stderr,none": 0.015537226438634602 + }, + "blimp_wh_vs_that_no_gap": { + "alias": " - blimp_wh_vs_that_no_gap", + "acc,none": 0.373, + "acc_stderr,none": 0.015300493622922809 + }, + "blimp_wh_vs_that_no_gap_long_distance": { + "alias": " - blimp_wh_vs_that_no_gap_long_distance", + "acc,none": 0.418, + "acc_stderr,none": 0.01560511196754195 + }, + "blimp_wh_vs_that_with_gap": { + "alias": " - blimp_wh_vs_that_with_gap", + "acc,none": 0.61, + "acc_stderr,none": 0.01543172505386661 + }, + "blimp_wh_vs_that_with_gap_long_distance": { + "alias": " - blimp_wh_vs_that_with_gap_long_distance", + "acc,none": 0.568, + "acc_stderr,none": 0.015672320237336206 + }, + "lambada_openai": { + "alias": "lambada_openai", + "perplexity,none": 3681922.383502935, + "perplexity_stderr,none": 360512.4372341794, + "acc,none": 0.0, + "acc_stderr,none": 0.0 + }, + "logiqa": { + "alias": "logiqa", + "acc,none": 0.22580645161290322, + "acc_stderr,none": 0.016399713788445066, + "acc_norm,none": 0.2457757296466974, + "acc_norm_stderr,none": 0.016887410894296923 + }, + "mmlu": { + "acc,none": 0.2460475715710013, + "acc_stderr,none": 0.0036316292061756832, + "alias": "mmlu" + }, + "mmlu_humanities": { + "acc,none": 0.24484590860786398, + "acc_stderr,none": 0.0062705558065704135, + "alias": " - humanities" + }, + "mmlu_formal_logic": { + "alias": " - formal_logic", + "acc,none": 0.2698412698412698, + "acc_stderr,none": 0.039701582732351734 + }, + "mmlu_high_school_european_history": { + "alias": " - high_school_european_history", + "acc,none": 0.26666666666666666, + "acc_stderr,none": 0.03453131801885414 + }, + "mmlu_high_school_us_history": { + "alias": " - high_school_us_history", + "acc,none": 0.23039215686274508, + "acc_stderr,none": 0.029554292605695066 + }, + "mmlu_high_school_world_history": { + "alias": " - high_school_world_history", + "acc,none": 0.24472573839662448, + "acc_stderr,none": 0.027985699387036423 + }, + "mmlu_international_law": { + "alias": " - international_law", + "acc,none": 0.2892561983471074, + "acc_stderr,none": 0.04139112727635463 + }, + "mmlu_jurisprudence": { + "alias": " - jurisprudence", + "acc,none": 0.2777777777777778, + "acc_stderr,none": 0.04330043749650742 + }, + "mmlu_logical_fallacies": { + "alias": " - logical_fallacies", + "acc,none": 0.24539877300613497, + "acc_stderr,none": 0.03380939813943354 + }, + "mmlu_moral_disputes": { + "alias": " - moral_disputes", + "acc,none": 0.26878612716763006, + "acc_stderr,none": 0.023868003262500118 + }, + "mmlu_moral_scenarios": { + "alias": " - moral_scenarios", + "acc,none": 0.2435754189944134, + "acc_stderr,none": 0.01435591196476786 + }, + "mmlu_philosophy": { + "alias": " - philosophy", + "acc,none": 0.2861736334405145, + "acc_stderr,none": 0.02567025924218895 + }, + "mmlu_prehistory": { + "alias": " - prehistory", + "acc,none": 0.24382716049382716, + "acc_stderr,none": 0.023891879541959607 + }, + "mmlu_professional_law": { + "alias": " - professional_law", + "acc,none": 0.22490221642764016, + "acc_stderr,none": 0.010663604709948288 + }, + "mmlu_world_religions": { + "alias": " - world_religions", + "acc,none": 0.23391812865497075, + "acc_stderr,none": 0.03246721765117825 + }, + "mmlu_other": { + "acc,none": 0.25523012552301255, + "acc_stderr,none": 0.007796215834368254, + "alias": " - other" + }, + "mmlu_business_ethics": { + "alias": " - business_ethics", + "acc,none": 0.23, + "acc_stderr,none": 0.04229525846816505 + }, + "mmlu_clinical_knowledge": { + "alias": " - clinical_knowledge", + "acc,none": 0.17735849056603772, + "acc_stderr,none": 0.023508739218846934 + }, + "mmlu_college_medicine": { + "alias": " - college_medicine", + "acc,none": 0.2138728323699422, + "acc_stderr,none": 0.031265112061730424 + }, + "mmlu_global_facts": { + "alias": " - global_facts", + "acc,none": 0.27, + "acc_stderr,none": 0.04461960433384739 + }, + "mmlu_human_aging": { + "alias": " - human_aging", + "acc,none": 0.3632286995515695, + "acc_stderr,none": 0.032277904428505 + }, + "mmlu_management": { + "alias": " - management", + "acc,none": 0.21359223300970873, + "acc_stderr,none": 0.04058042015646035 + }, + "mmlu_marketing": { + "alias": " - marketing", + "acc,none": 0.24786324786324787, + "acc_stderr,none": 0.028286324075564372 + }, + "mmlu_medical_genetics": { + "alias": " - medical_genetics", + "acc,none": 0.21, + "acc_stderr,none": 0.040936018074033256 + }, + "mmlu_miscellaneous": { + "alias": " - miscellaneous", + "acc,none": 0.2835249042145594, + "acc_stderr,none": 0.01611731816683228 + }, + "mmlu_nutrition": { + "alias": " - nutrition", + "acc,none": 0.23202614379084968, + "acc_stderr,none": 0.024170840879341016 + }, + "mmlu_professional_accounting": { + "alias": " - professional_accounting", + "acc,none": 0.2765957446808511, + "acc_stderr,none": 0.02668456434046099 + }, + "mmlu_professional_medicine": { + "alias": " - professional_medicine", + "acc,none": 0.22058823529411764, + "acc_stderr,none": 0.025187786660227245 + }, + "mmlu_virology": { + "alias": " - virology", + "acc,none": 0.27710843373493976, + "acc_stderr,none": 0.03484331592680588 + }, + "mmlu_social_sciences": { + "acc,none": 0.23561910952226195, + "acc_stderr,none": 0.007646817597937775, + "alias": " - social sciences" + }, + "mmlu_econometrics": { + "alias": " - econometrics", + "acc,none": 0.21929824561403508, + "acc_stderr,none": 0.03892431106518753 + }, + "mmlu_high_school_geography": { + "alias": " - high_school_geography", + "acc,none": 0.1919191919191919, + "acc_stderr,none": 0.028057791672989017 + }, + "mmlu_high_school_government_and_politics": { + "alias": " - high_school_government_and_politics", + "acc,none": 0.20207253886010362, + "acc_stderr,none": 0.02897908979429673 + }, + "mmlu_high_school_macroeconomics": { + "alias": " - high_school_macroeconomics", + "acc,none": 0.2282051282051282, + "acc_stderr,none": 0.021278393863586282 + }, + "mmlu_high_school_microeconomics": { + "alias": " - high_school_microeconomics", + "acc,none": 0.226890756302521, + "acc_stderr,none": 0.027205371538279472 + }, + "mmlu_high_school_psychology": { + "alias": " - high_school_psychology", + "acc,none": 0.25504587155963304, + "acc_stderr,none": 0.018688500856535846 + }, + "mmlu_human_sexuality": { + "alias": " - human_sexuality", + "acc,none": 0.22900763358778625, + "acc_stderr,none": 0.036853466317118506 + }, + "mmlu_professional_psychology": { + "alias": " - professional_psychology", + "acc,none": 0.2581699346405229, + "acc_stderr,none": 0.017704531653250075 + }, + "mmlu_public_relations": { + "alias": " - public_relations", + "acc,none": 0.3, + "acc_stderr,none": 0.04389311454644287 + }, + "mmlu_security_studies": { + "alias": " - security_studies", + "acc,none": 0.17959183673469387, + "acc_stderr,none": 0.024573293589585637 + }, + "mmlu_sociology": { + "alias": " - sociology", + "acc,none": 0.23383084577114427, + "acc_stderr,none": 0.029929415408348377 + }, + "mmlu_us_foreign_policy": { + "alias": " - us_foreign_policy", + "acc,none": 0.29, + "acc_stderr,none": 0.04560480215720683 + }, + "mmlu_stem": { + "acc,none": 0.24896923564858864, + "acc_stderr,none": 0.007702076690998317, + "alias": " - stem" + }, + "mmlu_abstract_algebra": { + "alias": " - abstract_algebra", + "acc,none": 0.34, + "acc_stderr,none": 0.04760952285695235 + }, + "mmlu_anatomy": { + "alias": " - anatomy", + "acc,none": 0.2962962962962963, + "acc_stderr,none": 0.03944624162501117 + }, + "mmlu_astronomy": { + "alias": " - astronomy", + "acc,none": 0.23026315789473684, + "acc_stderr,none": 0.034260594244031654 + }, + "mmlu_college_biology": { + "alias": " - college_biology", + "acc,none": 0.2638888888888889, + "acc_stderr,none": 0.03685651095897532 + }, + "mmlu_college_chemistry": { + "alias": " - college_chemistry", + "acc,none": 0.25, + "acc_stderr,none": 0.04351941398892446 + }, + "mmlu_college_computer_science": { + "alias": " - college_computer_science", + "acc,none": 0.18, + "acc_stderr,none": 0.03861229196653695 + }, + "mmlu_college_mathematics": { + "alias": " - college_mathematics", + "acc,none": 0.24, + "acc_stderr,none": 0.04292346959909281 + }, + "mmlu_college_physics": { + "alias": " - college_physics", + "acc,none": 0.24509803921568626, + "acc_stderr,none": 0.04280105837364396 + }, + "mmlu_computer_security": { + "alias": " - computer_security", + "acc,none": 0.23, + "acc_stderr,none": 0.04229525846816505 + }, + "mmlu_conceptual_physics": { + "alias": " - conceptual_physics", + "acc,none": 0.26382978723404255, + "acc_stderr,none": 0.028809989854102963 + }, + "mmlu_electrical_engineering": { + "alias": " - electrical_engineering", + "acc,none": 0.23448275862068965, + "acc_stderr,none": 0.035306258743465914 + }, + "mmlu_elementary_mathematics": { + "alias": " - elementary_mathematics", + "acc,none": 0.2698412698412698, + "acc_stderr,none": 0.022860838309232072 + }, + "mmlu_high_school_biology": { + "alias": " - high_school_biology", + "acc,none": 0.26129032258064516, + "acc_stderr,none": 0.02499305339776482 + }, + "mmlu_high_school_chemistry": { + "alias": " - high_school_chemistry", + "acc,none": 0.26108374384236455, + "acc_stderr,none": 0.03090379695211447 + }, + "mmlu_high_school_computer_science": { + "alias": " - high_school_computer_science", + "acc,none": 0.26, + "acc_stderr,none": 0.044084400227680794 + }, + "mmlu_high_school_mathematics": { + "alias": " - high_school_mathematics", + "acc,none": 0.25555555555555554, + "acc_stderr,none": 0.026593939101844065 + }, + "mmlu_high_school_physics": { + "alias": " - high_school_physics", + "acc,none": 0.18543046357615894, + "acc_stderr,none": 0.03173284384294285 + }, + "mmlu_high_school_statistics": { + "alias": " - high_school_statistics", + "acc,none": 0.19444444444444445, + "acc_stderr,none": 0.026991454502036716 + }, + "mmlu_machine_learning": { + "alias": " - machine_learning", + "acc,none": 0.23214285714285715, + "acc_stderr,none": 0.04007341809755806 + }, + "piqa": { + "alias": "piqa", + "acc,none": 0.5244831338411317, + "acc_stderr,none": 0.011651830225709977, + "acc_norm,none": 0.5201305767138193, + "acc_norm_stderr,none": 0.011656365410780373 + }, + "sciq": { + "alias": "sciq", + "acc,none": 0.194, + "acc_stderr,none": 0.012510816141264362, + "acc_norm,none": 0.215, + "acc_norm_stderr,none": 0.012997843819031834 + }, + "wikitext": { + "alias": "wikitext", + "word_perplexity,none": 323422.4990049516, + "word_perplexity_stderr,none": "N/A", + "byte_perplexity,none": 10.723944917267163, + "byte_perplexity_stderr,none": "N/A", + "bits_per_byte,none": 3.422763809045428, + "bits_per_byte_stderr,none": "N/A" + }, + "winogrande": { + "alias": "winogrande", + "acc,none": 0.4988161010260458, + "acc_stderr,none": 0.014052446290529012 + }, + "wsc": { + "alias": "wsc", + "acc,none": 0.6153846153846154, + "acc_stderr,none": 0.0479366886807504 + } + }, + "groups": { + "blimp": { + "acc,none": 0.5266119402985074, + "acc_stderr,none": 0.0018730484493487852, + "alias": "blimp" + }, + "mmlu": { + "acc,none": 0.2460475715710013, + "acc_stderr,none": 0.0036316292061756832, + "alias": "mmlu" + }, + "mmlu_humanities": { + "acc,none": 0.24484590860786398, + "acc_stderr,none": 0.0062705558065704135, + "alias": " - humanities" + }, + "mmlu_other": { + "acc,none": 0.25523012552301255, + "acc_stderr,none": 0.007796215834368254, + "alias": " - other" + }, + "mmlu_social_sciences": { + "acc,none": 0.23561910952226195, + "acc_stderr,none": 0.007646817597937775, + "alias": " - social sciences" + }, + "mmlu_stem": { + "acc,none": 0.24896923564858864, + "acc_stderr,none": 0.007702076690998317, + "alias": " - stem" + } + }, + "group_subtasks": { + "arc_easy": [], + "arc_challenge": [], + "blimp": [ + "blimp_adjunct_island", + "blimp_anaphor_gender_agreement", + "blimp_anaphor_number_agreement", + "blimp_animate_subject_passive", + "blimp_animate_subject_trans", + "blimp_causative", + "blimp_complex_NP_island", + "blimp_coordinate_structure_constraint_complex_left_branch", + "blimp_coordinate_structure_constraint_object_extraction", + "blimp_determiner_noun_agreement_1", + "blimp_determiner_noun_agreement_2", + "blimp_determiner_noun_agreement_irregular_1", + "blimp_determiner_noun_agreement_irregular_2", + "blimp_determiner_noun_agreement_with_adj_2", + "blimp_determiner_noun_agreement_with_adj_irregular_1", + "blimp_determiner_noun_agreement_with_adj_irregular_2", + "blimp_determiner_noun_agreement_with_adjective_1", + "blimp_distractor_agreement_relational_noun", + "blimp_distractor_agreement_relative_clause", + "blimp_drop_argument", + "blimp_ellipsis_n_bar_1", + "blimp_ellipsis_n_bar_2", + "blimp_existential_there_object_raising", + "blimp_existential_there_quantifiers_1", + "blimp_existential_there_quantifiers_2", + "blimp_existential_there_subject_raising", + "blimp_expletive_it_object_raising", + "blimp_inchoative", + "blimp_intransitive", + "blimp_irregular_past_participle_adjectives", + "blimp_irregular_past_participle_verbs", + "blimp_irregular_plural_subject_verb_agreement_1", + "blimp_irregular_plural_subject_verb_agreement_2", + "blimp_left_branch_island_echo_question", + "blimp_left_branch_island_simple_question", + "blimp_matrix_question_npi_licensor_present", + "blimp_npi_present_1", + "blimp_npi_present_2", + "blimp_only_npi_licensor_present", + "blimp_only_npi_scope", + "blimp_passive_1", + "blimp_passive_2", + "blimp_principle_A_c_command", + "blimp_principle_A_case_1", + "blimp_principle_A_case_2", + "blimp_principle_A_domain_1", + "blimp_principle_A_domain_2", + "blimp_principle_A_domain_3", + "blimp_principle_A_reconstruction", + "blimp_regular_plural_subject_verb_agreement_1", + "blimp_regular_plural_subject_verb_agreement_2", + "blimp_sentential_negation_npi_licensor_present", + "blimp_sentential_negation_npi_scope", + "blimp_sentential_subject_island", + "blimp_superlative_quantifiers_1", + "blimp_superlative_quantifiers_2", + "blimp_tough_vs_raising_1", + "blimp_tough_vs_raising_2", + "blimp_transitive", + "blimp_wh_island", + "blimp_wh_questions_object_gap", + "blimp_wh_questions_subject_gap", + "blimp_wh_questions_subject_gap_long_distance", + "blimp_wh_vs_that_no_gap", + "blimp_wh_vs_that_no_gap_long_distance", + "blimp_wh_vs_that_with_gap", + "blimp_wh_vs_that_with_gap_long_distance" + ], + "lambada_openai": [], + "logiqa": [], + "mmlu_humanities": [ + "mmlu_moral_disputes", + "mmlu_high_school_world_history", + "mmlu_jurisprudence", + "mmlu_philosophy", + "mmlu_high_school_us_history", + "mmlu_professional_law", + "mmlu_logical_fallacies", + "mmlu_moral_scenarios", + "mmlu_formal_logic", + "mmlu_prehistory", + "mmlu_high_school_european_history", + "mmlu_world_religions", + "mmlu_international_law" + ], + "mmlu_social_sciences": [ + "mmlu_us_foreign_policy", + "mmlu_sociology", + "mmlu_econometrics", + "mmlu_security_studies", + "mmlu_high_school_geography", + "mmlu_public_relations", + "mmlu_high_school_microeconomics", + "mmlu_professional_psychology", + "mmlu_high_school_macroeconomics", + "mmlu_human_sexuality", + "mmlu_high_school_government_and_politics", + "mmlu_high_school_psychology" + ], + "mmlu_other": [ + "mmlu_college_medicine", + "mmlu_medical_genetics", + "mmlu_business_ethics", + "mmlu_miscellaneous", + "mmlu_nutrition", + "mmlu_clinical_knowledge", + "mmlu_human_aging", + "mmlu_professional_accounting", + "mmlu_marketing", + "mmlu_global_facts", + "mmlu_professional_medicine", + "mmlu_virology", + "mmlu_management" + ], + "mmlu_stem": [ + "mmlu_elementary_mathematics", + "mmlu_electrical_engineering", + "mmlu_high_school_computer_science", + "mmlu_high_school_physics", + "mmlu_college_mathematics", + "mmlu_college_chemistry", + "mmlu_machine_learning", + "mmlu_high_school_mathematics", + "mmlu_computer_security", + "mmlu_conceptual_physics", + "mmlu_high_school_statistics", + "mmlu_high_school_biology", + "mmlu_astronomy", + "mmlu_college_computer_science", + "mmlu_college_biology", + "mmlu_college_physics", + "mmlu_anatomy", + "mmlu_high_school_chemistry", + "mmlu_abstract_algebra" + ], + "mmlu": [ + "mmlu_stem", + "mmlu_other", + "mmlu_social_sciences", + "mmlu_humanities" + ], + "piqa": [], + "sciq": [], + "wikitext": [], + "winogrande": [], + "wsc": [] + }, + "configs": { + "arc_challenge": { + "task": "arc_challenge", + "tag": [ + "ai2_arc" + ], + "dataset_path": "allenai/ai2_arc", + "dataset_name": "ARC-Challenge", + "training_split": "train", + "validation_split": "validation", + "test_split": "test", + "doc_to_text": "Question: {{question}}\nAnswer:", + "doc_to_target": "{{choices.label.index(answerKey)}}", + "doc_to_choice": "{{choices.text}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "Question: {{question}}\nAnswer:", + "metadata": { + "version": 1.0 + } + }, + "arc_easy": { + "task": "arc_easy", + "tag": [ + "ai2_arc" + ], + "dataset_path": "allenai/ai2_arc", + "dataset_name": "ARC-Easy", + "training_split": "train", + "validation_split": "validation", + "test_split": "test", + "doc_to_text": "Question: {{question}}\nAnswer:", + "doc_to_target": "{{choices.label.index(answerKey)}}", + "doc_to_choice": "{{choices.text}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "Question: {{question}}\nAnswer:", + "metadata": { + "version": 1.0 + } + }, + "blimp_adjunct_island": { + "task": "blimp_adjunct_island", + "dataset_path": "blimp", + "dataset_name": "adjunct_island", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_anaphor_gender_agreement": { + "task": "blimp_anaphor_gender_agreement", + "dataset_path": "blimp", + "dataset_name": "anaphor_gender_agreement", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_anaphor_number_agreement": { + "task": "blimp_anaphor_number_agreement", + "dataset_path": "blimp", + "dataset_name": "anaphor_number_agreement", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_animate_subject_passive": { + "task": "blimp_animate_subject_passive", + "dataset_path": "blimp", + "dataset_name": "animate_subject_passive", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_animate_subject_trans": { + "task": "blimp_animate_subject_trans", + "dataset_path": "blimp", + "dataset_name": "animate_subject_trans", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_causative": { + "task": "blimp_causative", + "dataset_path": "blimp", + "dataset_name": "causative", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_complex_NP_island": { + "task": "blimp_complex_NP_island", + "dataset_path": "blimp", + "dataset_name": "complex_NP_island", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_coordinate_structure_constraint_complex_left_branch": { + "task": "blimp_coordinate_structure_constraint_complex_left_branch", + "dataset_path": "blimp", + "dataset_name": "coordinate_structure_constraint_complex_left_branch", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_coordinate_structure_constraint_object_extraction": { + "task": "blimp_coordinate_structure_constraint_object_extraction", + "dataset_path": "blimp", + "dataset_name": "coordinate_structure_constraint_object_extraction", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_determiner_noun_agreement_1": { + "task": "blimp_determiner_noun_agreement_1", + "dataset_path": "blimp", + "dataset_name": "determiner_noun_agreement_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_determiner_noun_agreement_2": { + "task": "blimp_determiner_noun_agreement_2", + "dataset_path": "blimp", + "dataset_name": "determiner_noun_agreement_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_determiner_noun_agreement_irregular_1": { + "task": "blimp_determiner_noun_agreement_irregular_1", + "dataset_path": "blimp", + "dataset_name": "determiner_noun_agreement_irregular_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_determiner_noun_agreement_irregular_2": { + "task": "blimp_determiner_noun_agreement_irregular_2", + "dataset_path": "blimp", + "dataset_name": "determiner_noun_agreement_irregular_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_determiner_noun_agreement_with_adj_2": { + "task": "blimp_determiner_noun_agreement_with_adj_2", + "dataset_path": "blimp", + "dataset_name": "determiner_noun_agreement_with_adj_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_determiner_noun_agreement_with_adj_irregular_1": { + "task": "blimp_determiner_noun_agreement_with_adj_irregular_1", + "dataset_path": "blimp", + "dataset_name": "determiner_noun_agreement_with_adj_irregular_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_determiner_noun_agreement_with_adj_irregular_2": { + "task": "blimp_determiner_noun_agreement_with_adj_irregular_2", + "dataset_path": "blimp", + "dataset_name": "determiner_noun_agreement_with_adj_irregular_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_determiner_noun_agreement_with_adjective_1": { + "task": "blimp_determiner_noun_agreement_with_adjective_1", + "dataset_path": "blimp", + "dataset_name": "determiner_noun_agreement_with_adjective_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_distractor_agreement_relational_noun": { + "task": "blimp_distractor_agreement_relational_noun", + "dataset_path": "blimp", + "dataset_name": "distractor_agreement_relational_noun", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_distractor_agreement_relative_clause": { + "task": "blimp_distractor_agreement_relative_clause", + "dataset_path": "blimp", + "dataset_name": "distractor_agreement_relative_clause", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_drop_argument": { + "task": "blimp_drop_argument", + "dataset_path": "blimp", + "dataset_name": "drop_argument", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_ellipsis_n_bar_1": { + "task": "blimp_ellipsis_n_bar_1", + "dataset_path": "blimp", + "dataset_name": "ellipsis_n_bar_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_ellipsis_n_bar_2": { + "task": "blimp_ellipsis_n_bar_2", + "dataset_path": "blimp", + "dataset_name": "ellipsis_n_bar_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_existential_there_object_raising": { + "task": "blimp_existential_there_object_raising", + "dataset_path": "blimp", + "dataset_name": "existential_there_object_raising", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_existential_there_quantifiers_1": { + "task": "blimp_existential_there_quantifiers_1", + "dataset_path": "blimp", + "dataset_name": "existential_there_quantifiers_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_existential_there_quantifiers_2": { + "task": "blimp_existential_there_quantifiers_2", + "dataset_path": "blimp", + "dataset_name": "existential_there_quantifiers_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_existential_there_subject_raising": { + "task": "blimp_existential_there_subject_raising", + "dataset_path": "blimp", + "dataset_name": "existential_there_subject_raising", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_expletive_it_object_raising": { + "task": "blimp_expletive_it_object_raising", + "dataset_path": "blimp", + "dataset_name": "expletive_it_object_raising", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_inchoative": { + "task": "blimp_inchoative", + "dataset_path": "blimp", + "dataset_name": "inchoative", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_intransitive": { + "task": "blimp_intransitive", + "dataset_path": "blimp", + "dataset_name": "intransitive", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_irregular_past_participle_adjectives": { + "task": "blimp_irregular_past_participle_adjectives", + "dataset_path": "blimp", + "dataset_name": "irregular_past_participle_adjectives", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_irregular_past_participle_verbs": { + "task": "blimp_irregular_past_participle_verbs", + "dataset_path": "blimp", + "dataset_name": "irregular_past_participle_verbs", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_irregular_plural_subject_verb_agreement_1": { + "task": "blimp_irregular_plural_subject_verb_agreement_1", + "dataset_path": "blimp", + "dataset_name": "irregular_plural_subject_verb_agreement_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_irregular_plural_subject_verb_agreement_2": { + "task": "blimp_irregular_plural_subject_verb_agreement_2", + "dataset_path": "blimp", + "dataset_name": "irregular_plural_subject_verb_agreement_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_left_branch_island_echo_question": { + "task": "blimp_left_branch_island_echo_question", + "dataset_path": "blimp", + "dataset_name": "left_branch_island_echo_question", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_left_branch_island_simple_question": { + "task": "blimp_left_branch_island_simple_question", + "dataset_path": "blimp", + "dataset_name": "left_branch_island_simple_question", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_matrix_question_npi_licensor_present": { + "task": "blimp_matrix_question_npi_licensor_present", + "dataset_path": "blimp", + "dataset_name": "matrix_question_npi_licensor_present", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_npi_present_1": { + "task": "blimp_npi_present_1", + "dataset_path": "blimp", + "dataset_name": "npi_present_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_npi_present_2": { + "task": "blimp_npi_present_2", + "dataset_path": "blimp", + "dataset_name": "npi_present_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_only_npi_licensor_present": { + "task": "blimp_only_npi_licensor_present", + "dataset_path": "blimp", + "dataset_name": "only_npi_licensor_present", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_only_npi_scope": { + "task": "blimp_only_npi_scope", + "dataset_path": "blimp", + "dataset_name": "only_npi_scope", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_passive_1": { + "task": "blimp_passive_1", + "dataset_path": "blimp", + "dataset_name": "passive_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_passive_2": { + "task": "blimp_passive_2", + "dataset_path": "blimp", + "dataset_name": "passive_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_principle_A_c_command": { + "task": "blimp_principle_A_c_command", + "dataset_path": "blimp", + "dataset_name": "principle_A_c_command", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_principle_A_case_1": { + "task": "blimp_principle_A_case_1", + "dataset_path": "blimp", + "dataset_name": "principle_A_case_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_principle_A_case_2": { + "task": "blimp_principle_A_case_2", + "dataset_path": "blimp", + "dataset_name": "principle_A_case_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_principle_A_domain_1": { + "task": "blimp_principle_A_domain_1", + "dataset_path": "blimp", + "dataset_name": "principle_A_domain_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_principle_A_domain_2": { + "task": "blimp_principle_A_domain_2", + "dataset_path": "blimp", + "dataset_name": "principle_A_domain_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_principle_A_domain_3": { + "task": "blimp_principle_A_domain_3", + "dataset_path": "blimp", + "dataset_name": "principle_A_domain_3", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_principle_A_reconstruction": { + "task": "blimp_principle_A_reconstruction", + "dataset_path": "blimp", + "dataset_name": "principle_A_reconstruction", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_regular_plural_subject_verb_agreement_1": { + "task": "blimp_regular_plural_subject_verb_agreement_1", + "dataset_path": "blimp", + "dataset_name": "regular_plural_subject_verb_agreement_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_regular_plural_subject_verb_agreement_2": { + "task": "blimp_regular_plural_subject_verb_agreement_2", + "dataset_path": "blimp", + "dataset_name": "regular_plural_subject_verb_agreement_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_sentential_negation_npi_licensor_present": { + "task": "blimp_sentential_negation_npi_licensor_present", + "dataset_path": "blimp", + "dataset_name": "sentential_negation_npi_licensor_present", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_sentential_negation_npi_scope": { + "task": "blimp_sentential_negation_npi_scope", + "dataset_path": "blimp", + "dataset_name": "sentential_negation_npi_scope", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_sentential_subject_island": { + "task": "blimp_sentential_subject_island", + "dataset_path": "blimp", + "dataset_name": "sentential_subject_island", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_superlative_quantifiers_1": { + "task": "blimp_superlative_quantifiers_1", + "dataset_path": "blimp", + "dataset_name": "superlative_quantifiers_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_superlative_quantifiers_2": { + "task": "blimp_superlative_quantifiers_2", + "dataset_path": "blimp", + "dataset_name": "superlative_quantifiers_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_tough_vs_raising_1": { + "task": "blimp_tough_vs_raising_1", + "dataset_path": "blimp", + "dataset_name": "tough_vs_raising_1", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_tough_vs_raising_2": { + "task": "blimp_tough_vs_raising_2", + "dataset_path": "blimp", + "dataset_name": "tough_vs_raising_2", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_transitive": { + "task": "blimp_transitive", + "dataset_path": "blimp", + "dataset_name": "transitive", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_wh_island": { + "task": "blimp_wh_island", + "dataset_path": "blimp", + "dataset_name": "wh_island", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_wh_questions_object_gap": { + "task": "blimp_wh_questions_object_gap", + "dataset_path": "blimp", + "dataset_name": "wh_questions_object_gap", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_wh_questions_subject_gap": { + "task": "blimp_wh_questions_subject_gap", + "dataset_path": "blimp", + "dataset_name": "wh_questions_subject_gap", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_wh_questions_subject_gap_long_distance": { + "task": "blimp_wh_questions_subject_gap_long_distance", + "dataset_path": "blimp", + "dataset_name": "wh_questions_subject_gap_long_distance", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_wh_vs_that_no_gap": { + "task": "blimp_wh_vs_that_no_gap", + "dataset_path": "blimp", + "dataset_name": "wh_vs_that_no_gap", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_wh_vs_that_no_gap_long_distance": { + "task": "blimp_wh_vs_that_no_gap_long_distance", + "dataset_path": "blimp", + "dataset_name": "wh_vs_that_no_gap_long_distance", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_wh_vs_that_with_gap": { + "task": "blimp_wh_vs_that_with_gap", + "dataset_path": "blimp", + "dataset_name": "wh_vs_that_with_gap", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "blimp_wh_vs_that_with_gap_long_distance": { + "task": "blimp_wh_vs_that_with_gap_long_distance", + "dataset_path": "blimp", + "dataset_name": "wh_vs_that_with_gap_long_distance", + "validation_split": "train", + "doc_to_text": "", + "doc_to_target": 0, + "doc_to_choice": "{{[sentence_good, sentence_bad]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}", + "metadata": { + "version": 1.0 + } + }, + "lambada_openai": { + "task": "lambada_openai", + "tag": [ + "lambada" + ], + "dataset_path": "EleutherAI/lambada_openai", + "dataset_name": "default", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}", + "doc_to_target": "{{' '+text.split(' ')[-1]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "perplexity", + "aggregation": "perplexity", + "higher_is_better": false + }, + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "loglikelihood", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{text}}", + "metadata": { + "version": 1.0 + } + }, + "logiqa": { + "task": "logiqa", + "dataset_path": "EleutherAI/logiqa", + "dataset_name": "logiqa", + "dataset_kwargs": { + "trust_remote_code": true + }, + "training_split": "train", + "validation_split": "validation", + "test_split": "test", + "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Passage: \n Question: \n Choices:\n A. \n B. \n C. \n D. \n Answer:\n \"\"\"\n choices = [\"a\", \"b\", \"c\", \"d\"]\n prompt = \"Passage: \" + doc[\"context\"] + \"\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\nChoices:\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"Answer:\"\n return prompt\n", + "doc_to_target": "def doc_to_target(doc) -> int:\n choices = [\"a\", \"b\", \"c\", \"d\"]\n return choices.index(doc[\"label\"].strip())\n", + "doc_to_choice": "{{options}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{context}}", + "metadata": { + "version": 1.0 + } + }, + "mmlu_abstract_algebra": { + "task": "mmlu_abstract_algebra", + "task_alias": "abstract_algebra", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "abstract_algebra", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_anatomy": { + "task": "mmlu_anatomy", + "task_alias": "anatomy", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "anatomy", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about anatomy.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_astronomy": { + "task": "mmlu_astronomy", + "task_alias": "astronomy", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "astronomy", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about astronomy.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_business_ethics": { + "task": "mmlu_business_ethics", + "task_alias": "business_ethics", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "business_ethics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about business ethics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_clinical_knowledge": { + "task": "mmlu_clinical_knowledge", + "task_alias": "clinical_knowledge", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "clinical_knowledge", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_college_biology": { + "task": "mmlu_college_biology", + "task_alias": "college_biology", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "college_biology", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about college biology.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_college_chemistry": { + "task": "mmlu_college_chemistry", + "task_alias": "college_chemistry", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "college_chemistry", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about college chemistry.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_college_computer_science": { + "task": "mmlu_college_computer_science", + "task_alias": "college_computer_science", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "college_computer_science", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about college computer science.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_college_mathematics": { + "task": "mmlu_college_mathematics", + "task_alias": "college_mathematics", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "college_mathematics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about college mathematics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_college_medicine": { + "task": "mmlu_college_medicine", + "task_alias": "college_medicine", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "college_medicine", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about college medicine.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_college_physics": { + "task": "mmlu_college_physics", + "task_alias": "college_physics", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "college_physics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about college physics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_computer_security": { + "task": "mmlu_computer_security", + "task_alias": "computer_security", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "computer_security", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about computer security.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_conceptual_physics": { + "task": "mmlu_conceptual_physics", + "task_alias": "conceptual_physics", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "conceptual_physics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_econometrics": { + "task": "mmlu_econometrics", + "task_alias": "econometrics", + "tag": "mmlu_social_sciences_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "econometrics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about econometrics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_electrical_engineering": { + "task": "mmlu_electrical_engineering", + "task_alias": "electrical_engineering", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "electrical_engineering", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_elementary_mathematics": { + "task": "mmlu_elementary_mathematics", + "task_alias": "elementary_mathematics", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "elementary_mathematics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_formal_logic": { + "task": "mmlu_formal_logic", + "task_alias": "formal_logic", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "formal_logic", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about formal logic.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_global_facts": { + "task": "mmlu_global_facts", + "task_alias": "global_facts", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "global_facts", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about global facts.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_biology": { + "task": "mmlu_high_school_biology", + "task_alias": "high_school_biology", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_biology", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school biology.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_chemistry": { + "task": "mmlu_high_school_chemistry", + "task_alias": "high_school_chemistry", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_chemistry", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_computer_science": { + "task": "mmlu_high_school_computer_science", + "task_alias": "high_school_computer_science", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_computer_science", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school computer science.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_european_history": { + "task": "mmlu_high_school_european_history", + "task_alias": "high_school_european_history", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_european_history", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school european history.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_geography": { + "task": "mmlu_high_school_geography", + "task_alias": "high_school_geography", + "tag": "mmlu_social_sciences_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_geography", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school geography.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_government_and_politics": { + "task": "mmlu_high_school_government_and_politics", + "task_alias": "high_school_government_and_politics", + "tag": "mmlu_social_sciences_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_government_and_politics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_macroeconomics": { + "task": "mmlu_high_school_macroeconomics", + "task_alias": "high_school_macroeconomics", + "tag": "mmlu_social_sciences_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_macroeconomics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_mathematics": { + "task": "mmlu_high_school_mathematics", + "task_alias": "high_school_mathematics", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_mathematics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_microeconomics": { + "task": "mmlu_high_school_microeconomics", + "task_alias": "high_school_microeconomics", + "tag": "mmlu_social_sciences_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_microeconomics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_physics": { + "task": "mmlu_high_school_physics", + "task_alias": "high_school_physics", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_physics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school physics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_psychology": { + "task": "mmlu_high_school_psychology", + "task_alias": "high_school_psychology", + "tag": "mmlu_social_sciences_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_psychology", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school psychology.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_statistics": { + "task": "mmlu_high_school_statistics", + "task_alias": "high_school_statistics", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_statistics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school statistics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_us_history": { + "task": "mmlu_high_school_us_history", + "task_alias": "high_school_us_history", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_us_history", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school us history.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_high_school_world_history": { + "task": "mmlu_high_school_world_history", + "task_alias": "high_school_world_history", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "high_school_world_history", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about high school world history.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_human_aging": { + "task": "mmlu_human_aging", + "task_alias": "human_aging", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "human_aging", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about human aging.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_human_sexuality": { + "task": "mmlu_human_sexuality", + "task_alias": "human_sexuality", + "tag": "mmlu_social_sciences_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "human_sexuality", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about human sexuality.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_international_law": { + "task": "mmlu_international_law", + "task_alias": "international_law", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "international_law", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about international law.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_jurisprudence": { + "task": "mmlu_jurisprudence", + "task_alias": "jurisprudence", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "jurisprudence", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_logical_fallacies": { + "task": "mmlu_logical_fallacies", + "task_alias": "logical_fallacies", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "logical_fallacies", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_machine_learning": { + "task": "mmlu_machine_learning", + "task_alias": "machine_learning", + "tag": "mmlu_stem_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "machine_learning", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about machine learning.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_management": { + "task": "mmlu_management", + "task_alias": "management", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "management", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about management.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_marketing": { + "task": "mmlu_marketing", + "task_alias": "marketing", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "marketing", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about marketing.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_medical_genetics": { + "task": "mmlu_medical_genetics", + "task_alias": "medical_genetics", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "medical_genetics", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about medical genetics.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_miscellaneous": { + "task": "mmlu_miscellaneous", + "task_alias": "miscellaneous", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "miscellaneous", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_moral_disputes": { + "task": "mmlu_moral_disputes", + "task_alias": "moral_disputes", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "moral_disputes", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about moral disputes.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_moral_scenarios": { + "task": "mmlu_moral_scenarios", + "task_alias": "moral_scenarios", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "moral_scenarios", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_nutrition": { + "task": "mmlu_nutrition", + "task_alias": "nutrition", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "nutrition", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about nutrition.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_philosophy": { + "task": "mmlu_philosophy", + "task_alias": "philosophy", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "philosophy", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about philosophy.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_prehistory": { + "task": "mmlu_prehistory", + "task_alias": "prehistory", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "prehistory", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about prehistory.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_professional_accounting": { + "task": "mmlu_professional_accounting", + "task_alias": "professional_accounting", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "professional_accounting", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about professional accounting.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_professional_law": { + "task": "mmlu_professional_law", + "task_alias": "professional_law", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "professional_law", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about professional law.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_professional_medicine": { + "task": "mmlu_professional_medicine", + "task_alias": "professional_medicine", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "professional_medicine", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about professional medicine.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_professional_psychology": { + "task": "mmlu_professional_psychology", + "task_alias": "professional_psychology", + "tag": "mmlu_social_sciences_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "professional_psychology", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about professional psychology.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_public_relations": { + "task": "mmlu_public_relations", + "task_alias": "public_relations", + "tag": "mmlu_social_sciences_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "public_relations", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about public relations.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_security_studies": { + "task": "mmlu_security_studies", + "task_alias": "security_studies", + "tag": "mmlu_social_sciences_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "security_studies", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about security studies.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_sociology": { + "task": "mmlu_sociology", + "task_alias": "sociology", + "tag": "mmlu_social_sciences_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "sociology", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about sociology.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_us_foreign_policy": { + "task": "mmlu_us_foreign_policy", + "task_alias": "us_foreign_policy", + "tag": "mmlu_social_sciences_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "us_foreign_policy", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_virology": { + "task": "mmlu_virology", + "task_alias": "virology", + "tag": "mmlu_other_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "virology", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about virology.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "mmlu_world_religions": { + "task": "mmlu_world_religions", + "task_alias": "world_religions", + "tag": "mmlu_humanities_tasks", + "dataset_path": "hails/mmlu_no_train", + "dataset_name": "world_religions", + "dataset_kwargs": { + "trust_remote_code": true + }, + "test_split": "test", + "fewshot_split": "dev", + "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", + "doc_to_target": "answer", + "doc_to_choice": [ + "A", + "B", + "C", + "D" + ], + "description": "The following are multiple choice questions (with answers) about world religions.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" + }, + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "piqa": { + "task": "piqa", + "dataset_path": "piqa", + "dataset_kwargs": { + "trust_remote_code": true + }, + "training_split": "train", + "validation_split": "validation", + "doc_to_text": "Question: {{goal}}\nAnswer:", + "doc_to_target": "label", + "doc_to_choice": "{{[sol1, sol2]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "goal", + "metadata": { + "version": 1.0 + } + }, + "sciq": { + "task": "sciq", + "dataset_path": "sciq", + "training_split": "train", + "validation_split": "validation", + "test_split": "test", + "doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:", + "doc_to_target": 3, + "doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{support}} {{question}}", + "metadata": { + "version": 1.0 + } + }, + "wikitext": { + "task": "wikitext", + "dataset_path": "EleutherAI/wikitext_document_level", + "dataset_name": "wikitext-2-raw-v1", + "dataset_kwargs": { + "trust_remote_code": true + }, + "training_split": "train", + "validation_split": "validation", + "test_split": "test", + "doc_to_text": "", + "doc_to_target": "def wikitext_detokenizer(doc):\n string = doc[\"page\"]\n # contractions\n string = string.replace(\"s '\", \"s'\")\n string = re.sub(r\"/' [0-9]/\", r\"/'[0-9]/\", string)\n # number separators\n string = string.replace(\" @-@ \", \"-\")\n string = string.replace(\" @,@ \", \",\")\n string = string.replace(\" @.@ \", \".\")\n # punctuation\n string = string.replace(\" : \", \": \")\n string = string.replace(\" ; \", \"; \")\n string = string.replace(\" . \", \". \")\n string = string.replace(\" ! \", \"! \")\n string = string.replace(\" ? \", \"? \")\n string = string.replace(\" , \", \", \")\n # double brackets\n string = re.sub(r\"\\(\\s*([^\\)]*?)\\s*\\)\", r\"(\\1)\", string)\n string = re.sub(r\"\\[\\s*([^\\]]*?)\\s*\\]\", r\"[\\1]\", string)\n string = re.sub(r\"{\\s*([^}]*?)\\s*}\", r\"{\\1}\", string)\n string = re.sub(r\"\\\"\\s*([^\\\"]*?)\\s*\\\"\", r'\"\\1\"', string)\n string = re.sub(r\"'\\s*([^']*?)\\s*'\", r\"'\\1'\", string)\n # miscellaneous\n string = string.replace(\"= = = =\", \"====\")\n string = string.replace(\"= = =\", \"===\")\n string = string.replace(\"= =\", \"==\")\n string = string.replace(\" \" + chr(176) + \" \", chr(176))\n string = string.replace(\" \\n\", \"\\n\")\n string = string.replace(\"\\n \", \"\\n\")\n string = string.replace(\" N \", \" 1 \")\n string = string.replace(\" 's\", \"'s\")\n\n return string\n", + "process_results": "def process_results(doc, results):\n (loglikelihood,) = results\n # IMPORTANT: wikitext counts number of words in *original doc before detokenization*\n _words = len(re.split(r\"\\s+\", doc[\"page\"]))\n _bytes = len(doc[\"page\"].encode(\"utf-8\"))\n return {\n \"word_perplexity\": (loglikelihood, _words),\n \"byte_perplexity\": (loglikelihood, _bytes),\n \"bits_per_byte\": (loglikelihood, _bytes),\n }\n", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "word_perplexity" + }, + { + "metric": "byte_perplexity" + }, + { + "metric": "bits_per_byte" + } + ], + "output_type": "loglikelihood_rolling", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "{{page}}", + "metadata": { + "version": 2.0 + } + }, + "winogrande": { + "task": "winogrande", + "dataset_path": "winogrande", + "dataset_name": "winogrande_xl", + "dataset_kwargs": { + "trust_remote_code": true + }, + "training_split": "train", + "validation_split": "validation", + "doc_to_text": "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n return answer_to_num[doc[\"answer\"]]\n", + "doc_to_target": "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"_\") + 1\n return doc[\"sentence\"][idx:].strip()\n", + "doc_to_choice": "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"_\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"sentence\"][:idx] + opt for opt in options]\n", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": true, + "doc_to_decontamination_query": "sentence", + "metadata": { + "version": 1.0 + } + }, + "wsc": { + "task": "wsc", + "tag": [ + "super-glue-lm-eval-v1" + ], + "dataset_path": "super_glue", + "dataset_name": "wsc.fixed", + "training_split": "train", + "validation_split": "validation", + "doc_to_text": "def default_doc_to_text(x):\n raw_passage = x[\"text\"]\n # NOTE: HuggingFace span indices are word-based not character-based.\n pre = \" \".join(raw_passage.split()[: x[\"span2_index\"]])\n post = raw_passage[len(pre) + len(x[\"span2_text\"]) + 1 :]\n passage = general_detokenize(pre + \" *{}*\".format(x[\"span2_text\"]) + post)\n noun = x[\"span1_text\"]\n pronoun = x[\"span2_text\"]\n text = (\n f\"Passage: {passage}\\n\"\n + f'Question: In the passage above, does the pronoun \"*{pronoun}*\" refer to \"*{noun}*\"?\\n'\n + \"Answer:\"\n )\n return text\n", + "doc_to_target": "label", + "doc_to_choice": [ + "no", + "yes" + ], + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc" + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + } + }, + "versions": { + "arc_challenge": 1.0, + "arc_easy": 1.0, + "blimp": 2.0, + "blimp_adjunct_island": 1.0, + "blimp_anaphor_gender_agreement": 1.0, + "blimp_anaphor_number_agreement": 1.0, + "blimp_animate_subject_passive": 1.0, + "blimp_animate_subject_trans": 1.0, + "blimp_causative": 1.0, + "blimp_complex_NP_island": 1.0, + "blimp_coordinate_structure_constraint_complex_left_branch": 1.0, + "blimp_coordinate_structure_constraint_object_extraction": 1.0, + "blimp_determiner_noun_agreement_1": 1.0, + "blimp_determiner_noun_agreement_2": 1.0, + "blimp_determiner_noun_agreement_irregular_1": 1.0, + "blimp_determiner_noun_agreement_irregular_2": 1.0, + "blimp_determiner_noun_agreement_with_adj_2": 1.0, + "blimp_determiner_noun_agreement_with_adj_irregular_1": 1.0, + "blimp_determiner_noun_agreement_with_adj_irregular_2": 1.0, + "blimp_determiner_noun_agreement_with_adjective_1": 1.0, + "blimp_distractor_agreement_relational_noun": 1.0, + "blimp_distractor_agreement_relative_clause": 1.0, + "blimp_drop_argument": 1.0, + "blimp_ellipsis_n_bar_1": 1.0, + "blimp_ellipsis_n_bar_2": 1.0, + "blimp_existential_there_object_raising": 1.0, + "blimp_existential_there_quantifiers_1": 1.0, + "blimp_existential_there_quantifiers_2": 1.0, + "blimp_existential_there_subject_raising": 1.0, + "blimp_expletive_it_object_raising": 1.0, + "blimp_inchoative": 1.0, + "blimp_intransitive": 1.0, + "blimp_irregular_past_participle_adjectives": 1.0, + "blimp_irregular_past_participle_verbs": 1.0, + "blimp_irregular_plural_subject_verb_agreement_1": 1.0, + "blimp_irregular_plural_subject_verb_agreement_2": 1.0, + "blimp_left_branch_island_echo_question": 1.0, + "blimp_left_branch_island_simple_question": 1.0, + "blimp_matrix_question_npi_licensor_present": 1.0, + "blimp_npi_present_1": 1.0, + "blimp_npi_present_2": 1.0, + "blimp_only_npi_licensor_present": 1.0, + "blimp_only_npi_scope": 1.0, + "blimp_passive_1": 1.0, + "blimp_passive_2": 1.0, + "blimp_principle_A_c_command": 1.0, + "blimp_principle_A_case_1": 1.0, + "blimp_principle_A_case_2": 1.0, + "blimp_principle_A_domain_1": 1.0, + "blimp_principle_A_domain_2": 1.0, + "blimp_principle_A_domain_3": 1.0, + "blimp_principle_A_reconstruction": 1.0, + "blimp_regular_plural_subject_verb_agreement_1": 1.0, + "blimp_regular_plural_subject_verb_agreement_2": 1.0, + "blimp_sentential_negation_npi_licensor_present": 1.0, + "blimp_sentential_negation_npi_scope": 1.0, + "blimp_sentential_subject_island": 1.0, + "blimp_superlative_quantifiers_1": 1.0, + "blimp_superlative_quantifiers_2": 1.0, + "blimp_tough_vs_raising_1": 1.0, + "blimp_tough_vs_raising_2": 1.0, + "blimp_transitive": 1.0, + "blimp_wh_island": 1.0, + "blimp_wh_questions_object_gap": 1.0, + "blimp_wh_questions_subject_gap": 1.0, + "blimp_wh_questions_subject_gap_long_distance": 1.0, + "blimp_wh_vs_that_no_gap": 1.0, + "blimp_wh_vs_that_no_gap_long_distance": 1.0, + "blimp_wh_vs_that_with_gap": 1.0, + "blimp_wh_vs_that_with_gap_long_distance": 1.0, + "lambada_openai": 1.0, + "logiqa": 1.0, + "mmlu": 2, + "mmlu_abstract_algebra": 1.0, + "mmlu_anatomy": 1.0, + "mmlu_astronomy": 1.0, + "mmlu_business_ethics": 1.0, + "mmlu_clinical_knowledge": 1.0, + "mmlu_college_biology": 1.0, + "mmlu_college_chemistry": 1.0, + "mmlu_college_computer_science": 1.0, + "mmlu_college_mathematics": 1.0, + "mmlu_college_medicine": 1.0, + "mmlu_college_physics": 1.0, + "mmlu_computer_security": 1.0, + "mmlu_conceptual_physics": 1.0, + "mmlu_econometrics": 1.0, + "mmlu_electrical_engineering": 1.0, + "mmlu_elementary_mathematics": 1.0, + "mmlu_formal_logic": 1.0, + "mmlu_global_facts": 1.0, + "mmlu_high_school_biology": 1.0, + "mmlu_high_school_chemistry": 1.0, + "mmlu_high_school_computer_science": 1.0, + "mmlu_high_school_european_history": 1.0, + "mmlu_high_school_geography": 1.0, + "mmlu_high_school_government_and_politics": 1.0, + "mmlu_high_school_macroeconomics": 1.0, + "mmlu_high_school_mathematics": 1.0, + "mmlu_high_school_microeconomics": 1.0, + "mmlu_high_school_physics": 1.0, + "mmlu_high_school_psychology": 1.0, + "mmlu_high_school_statistics": 1.0, + "mmlu_high_school_us_history": 1.0, + "mmlu_high_school_world_history": 1.0, + "mmlu_human_aging": 1.0, + "mmlu_human_sexuality": 1.0, + "mmlu_humanities": 2, + "mmlu_international_law": 1.0, + "mmlu_jurisprudence": 1.0, + "mmlu_logical_fallacies": 1.0, + "mmlu_machine_learning": 1.0, + "mmlu_management": 1.0, + "mmlu_marketing": 1.0, + "mmlu_medical_genetics": 1.0, + "mmlu_miscellaneous": 1.0, + "mmlu_moral_disputes": 1.0, + "mmlu_moral_scenarios": 1.0, + "mmlu_nutrition": 1.0, + "mmlu_other": 2, + "mmlu_philosophy": 1.0, + "mmlu_prehistory": 1.0, + "mmlu_professional_accounting": 1.0, + "mmlu_professional_law": 1.0, + "mmlu_professional_medicine": 1.0, + "mmlu_professional_psychology": 1.0, + "mmlu_public_relations": 1.0, + "mmlu_security_studies": 1.0, + "mmlu_social_sciences": 2, + "mmlu_sociology": 1.0, + "mmlu_stem": 2, + "mmlu_us_foreign_policy": 1.0, + "mmlu_virology": 1.0, + "mmlu_world_religions": 1.0, + "piqa": 1.0, + "sciq": 1.0, + "wikitext": 2.0, + "winogrande": 1.0, + "wsc": 1.0 + }, + "n-shot": { + "arc_challenge": 0, + "arc_easy": 0, + "blimp_adjunct_island": 0, + "blimp_anaphor_gender_agreement": 0, + "blimp_anaphor_number_agreement": 0, + "blimp_animate_subject_passive": 0, + "blimp_animate_subject_trans": 0, + "blimp_causative": 0, + "blimp_complex_NP_island": 0, + "blimp_coordinate_structure_constraint_complex_left_branch": 0, + "blimp_coordinate_structure_constraint_object_extraction": 0, + "blimp_determiner_noun_agreement_1": 0, + "blimp_determiner_noun_agreement_2": 0, + "blimp_determiner_noun_agreement_irregular_1": 0, + "blimp_determiner_noun_agreement_irregular_2": 0, + "blimp_determiner_noun_agreement_with_adj_2": 0, + "blimp_determiner_noun_agreement_with_adj_irregular_1": 0, + "blimp_determiner_noun_agreement_with_adj_irregular_2": 0, + "blimp_determiner_noun_agreement_with_adjective_1": 0, + "blimp_distractor_agreement_relational_noun": 0, + "blimp_distractor_agreement_relative_clause": 0, + "blimp_drop_argument": 0, + "blimp_ellipsis_n_bar_1": 0, + "blimp_ellipsis_n_bar_2": 0, + "blimp_existential_there_object_raising": 0, + "blimp_existential_there_quantifiers_1": 0, + "blimp_existential_there_quantifiers_2": 0, + "blimp_existential_there_subject_raising": 0, + "blimp_expletive_it_object_raising": 0, + "blimp_inchoative": 0, + "blimp_intransitive": 0, + "blimp_irregular_past_participle_adjectives": 0, + "blimp_irregular_past_participle_verbs": 0, + "blimp_irregular_plural_subject_verb_agreement_1": 0, + "blimp_irregular_plural_subject_verb_agreement_2": 0, + "blimp_left_branch_island_echo_question": 0, + "blimp_left_branch_island_simple_question": 0, + "blimp_matrix_question_npi_licensor_present": 0, + "blimp_npi_present_1": 0, + "blimp_npi_present_2": 0, + "blimp_only_npi_licensor_present": 0, + "blimp_only_npi_scope": 0, + "blimp_passive_1": 0, + "blimp_passive_2": 0, + "blimp_principle_A_c_command": 0, + "blimp_principle_A_case_1": 0, + "blimp_principle_A_case_2": 0, + "blimp_principle_A_domain_1": 0, + "blimp_principle_A_domain_2": 0, + "blimp_principle_A_domain_3": 0, + "blimp_principle_A_reconstruction": 0, + "blimp_regular_plural_subject_verb_agreement_1": 0, + "blimp_regular_plural_subject_verb_agreement_2": 0, + "blimp_sentential_negation_npi_licensor_present": 0, + "blimp_sentential_negation_npi_scope": 0, + "blimp_sentential_subject_island": 0, + "blimp_superlative_quantifiers_1": 0, + "blimp_superlative_quantifiers_2": 0, + "blimp_tough_vs_raising_1": 0, + "blimp_tough_vs_raising_2": 0, + "blimp_transitive": 0, + "blimp_wh_island": 0, + "blimp_wh_questions_object_gap": 0, + "blimp_wh_questions_subject_gap": 0, + "blimp_wh_questions_subject_gap_long_distance": 0, + "blimp_wh_vs_that_no_gap": 0, + "blimp_wh_vs_that_no_gap_long_distance": 0, + "blimp_wh_vs_that_with_gap": 0, + "blimp_wh_vs_that_with_gap_long_distance": 0, + "lambada_openai": 0, + "logiqa": 0, + "mmlu_abstract_algebra": 0, + "mmlu_anatomy": 0, + "mmlu_astronomy": 0, + "mmlu_business_ethics": 0, + "mmlu_clinical_knowledge": 0, + "mmlu_college_biology": 0, + "mmlu_college_chemistry": 0, + "mmlu_college_computer_science": 0, + "mmlu_college_mathematics": 0, + "mmlu_college_medicine": 0, + "mmlu_college_physics": 0, + "mmlu_computer_security": 0, + "mmlu_conceptual_physics": 0, + "mmlu_econometrics": 0, + "mmlu_electrical_engineering": 0, + "mmlu_elementary_mathematics": 0, + "mmlu_formal_logic": 0, + "mmlu_global_facts": 0, + "mmlu_high_school_biology": 0, + "mmlu_high_school_chemistry": 0, + "mmlu_high_school_computer_science": 0, + "mmlu_high_school_european_history": 0, + "mmlu_high_school_geography": 0, + "mmlu_high_school_government_and_politics": 0, + "mmlu_high_school_macroeconomics": 0, + "mmlu_high_school_mathematics": 0, + "mmlu_high_school_microeconomics": 0, + "mmlu_high_school_physics": 0, + "mmlu_high_school_psychology": 0, + "mmlu_high_school_statistics": 0, + "mmlu_high_school_us_history": 0, + "mmlu_high_school_world_history": 0, + "mmlu_human_aging": 0, + "mmlu_human_sexuality": 0, + "mmlu_international_law": 0, + "mmlu_jurisprudence": 0, + "mmlu_logical_fallacies": 0, + "mmlu_machine_learning": 0, + "mmlu_management": 0, + "mmlu_marketing": 0, + "mmlu_medical_genetics": 0, + "mmlu_miscellaneous": 0, + "mmlu_moral_disputes": 0, + "mmlu_moral_scenarios": 0, + "mmlu_nutrition": 0, + "mmlu_philosophy": 0, + "mmlu_prehistory": 0, + "mmlu_professional_accounting": 0, + "mmlu_professional_law": 0, + "mmlu_professional_medicine": 0, + "mmlu_professional_psychology": 0, + "mmlu_public_relations": 0, + "mmlu_security_studies": 0, + "mmlu_sociology": 0, + "mmlu_us_foreign_policy": 0, + "mmlu_virology": 0, + "mmlu_world_religions": 0, + "piqa": 0, + "sciq": 0, + "wikitext": 0, + "winogrande": 0, + "wsc": 0 + }, + "higher_is_better": { + "arc_challenge": { + "acc": true, + "acc_norm": true + }, + "arc_easy": { + "acc": true, + "acc_norm": true + }, + "blimp": { + "acc": true + }, + "blimp_adjunct_island": { + "acc": true + }, + "blimp_anaphor_gender_agreement": { + "acc": true + }, + "blimp_anaphor_number_agreement": { + "acc": true + }, + "blimp_animate_subject_passive": { + "acc": true + }, + "blimp_animate_subject_trans": { + "acc": true + }, + "blimp_causative": { + "acc": true + }, + "blimp_complex_NP_island": { + "acc": true + }, + "blimp_coordinate_structure_constraint_complex_left_branch": { + "acc": true + }, + "blimp_coordinate_structure_constraint_object_extraction": { + "acc": true + }, + "blimp_determiner_noun_agreement_1": { + "acc": true + }, + "blimp_determiner_noun_agreement_2": { + "acc": true + }, + "blimp_determiner_noun_agreement_irregular_1": { + "acc": true + }, + "blimp_determiner_noun_agreement_irregular_2": { + "acc": true + }, + "blimp_determiner_noun_agreement_with_adj_2": { + "acc": true + }, + "blimp_determiner_noun_agreement_with_adj_irregular_1": { + "acc": true + }, + "blimp_determiner_noun_agreement_with_adj_irregular_2": { + "acc": true + }, + "blimp_determiner_noun_agreement_with_adjective_1": { + "acc": true + }, + "blimp_distractor_agreement_relational_noun": { + "acc": true + }, + "blimp_distractor_agreement_relative_clause": { + "acc": true + }, + "blimp_drop_argument": { + "acc": true + }, + "blimp_ellipsis_n_bar_1": { + "acc": true + }, + "blimp_ellipsis_n_bar_2": { + "acc": true + }, + "blimp_existential_there_object_raising": { + "acc": true + }, + "blimp_existential_there_quantifiers_1": { + "acc": true + }, + "blimp_existential_there_quantifiers_2": { + "acc": true + }, + "blimp_existential_there_subject_raising": { + "acc": true + }, + "blimp_expletive_it_object_raising": { + "acc": true + }, + "blimp_inchoative": { + "acc": true + }, + "blimp_intransitive": { + "acc": true + }, + "blimp_irregular_past_participle_adjectives": { + "acc": true + }, + "blimp_irregular_past_participle_verbs": { + "acc": true + }, + "blimp_irregular_plural_subject_verb_agreement_1": { + "acc": true + }, + "blimp_irregular_plural_subject_verb_agreement_2": { + "acc": true + }, + "blimp_left_branch_island_echo_question": { + "acc": true + }, + "blimp_left_branch_island_simple_question": { + "acc": true + }, + "blimp_matrix_question_npi_licensor_present": { + "acc": true + }, + "blimp_npi_present_1": { + "acc": true + }, + "blimp_npi_present_2": { + "acc": true + }, + "blimp_only_npi_licensor_present": { + "acc": true + }, + "blimp_only_npi_scope": { + "acc": true + }, + "blimp_passive_1": { + "acc": true + }, + "blimp_passive_2": { + "acc": true + }, + "blimp_principle_A_c_command": { + "acc": true + }, + "blimp_principle_A_case_1": { + "acc": true + }, + "blimp_principle_A_case_2": { + "acc": true + }, + "blimp_principle_A_domain_1": { + "acc": true + }, + "blimp_principle_A_domain_2": { + "acc": true + }, + "blimp_principle_A_domain_3": { + "acc": true + }, + "blimp_principle_A_reconstruction": { + "acc": true + }, + "blimp_regular_plural_subject_verb_agreement_1": { + "acc": true + }, + "blimp_regular_plural_subject_verb_agreement_2": { + "acc": true + }, + "blimp_sentential_negation_npi_licensor_present": { + "acc": true + }, + "blimp_sentential_negation_npi_scope": { + "acc": true + }, + "blimp_sentential_subject_island": { + "acc": true + }, + "blimp_superlative_quantifiers_1": { + "acc": true + }, + "blimp_superlative_quantifiers_2": { + "acc": true + }, + "blimp_tough_vs_raising_1": { + "acc": true + }, + "blimp_tough_vs_raising_2": { + "acc": true + }, + "blimp_transitive": { + "acc": true + }, + "blimp_wh_island": { + "acc": true + }, + "blimp_wh_questions_object_gap": { + "acc": true + }, + "blimp_wh_questions_subject_gap": { + "acc": true + }, + "blimp_wh_questions_subject_gap_long_distance": { + "acc": true + }, + "blimp_wh_vs_that_no_gap": { + "acc": true + }, + "blimp_wh_vs_that_no_gap_long_distance": { + "acc": true + }, + "blimp_wh_vs_that_with_gap": { + "acc": true + }, + "blimp_wh_vs_that_with_gap_long_distance": { + "acc": true + }, + "lambada_openai": { + "perplexity": false, + "acc": true + }, + "logiqa": { + "acc": true, + "acc_norm": true + }, + "mmlu": { + "acc": true + }, + "mmlu_abstract_algebra": { + "acc": true + }, + "mmlu_anatomy": { + "acc": true + }, + "mmlu_astronomy": { + "acc": true + }, + "mmlu_business_ethics": { + "acc": true + }, + "mmlu_clinical_knowledge": { + "acc": true + }, + "mmlu_college_biology": { + "acc": true + }, + "mmlu_college_chemistry": { + "acc": true + }, + "mmlu_college_computer_science": { + "acc": true + }, + "mmlu_college_mathematics": { + "acc": true + }, + "mmlu_college_medicine": { + "acc": true + }, + "mmlu_college_physics": { + "acc": true + }, + "mmlu_computer_security": { + "acc": true + }, + "mmlu_conceptual_physics": { + "acc": true + }, + "mmlu_econometrics": { + "acc": true + }, + "mmlu_electrical_engineering": { + "acc": true + }, + "mmlu_elementary_mathematics": { + "acc": true + }, + "mmlu_formal_logic": { + "acc": true + }, + "mmlu_global_facts": { + "acc": true + }, + "mmlu_high_school_biology": { + "acc": true + }, + "mmlu_high_school_chemistry": { + "acc": true + }, + "mmlu_high_school_computer_science": { + "acc": true + }, + "mmlu_high_school_european_history": { + "acc": true + }, + "mmlu_high_school_geography": { + "acc": true + }, + "mmlu_high_school_government_and_politics": { + "acc": true + }, + "mmlu_high_school_macroeconomics": { + "acc": true + }, + "mmlu_high_school_mathematics": { + "acc": true + }, + "mmlu_high_school_microeconomics": { + "acc": true + }, + "mmlu_high_school_physics": { + "acc": true + }, + "mmlu_high_school_psychology": { + "acc": true + }, + "mmlu_high_school_statistics": { + "acc": true + }, + "mmlu_high_school_us_history": { + "acc": true + }, + "mmlu_high_school_world_history": { + "acc": true + }, + "mmlu_human_aging": { + "acc": true + }, + "mmlu_human_sexuality": { + "acc": true + }, + "mmlu_humanities": { + "acc": true + }, + "mmlu_international_law": { + "acc": true + }, + "mmlu_jurisprudence": { + "acc": true + }, + "mmlu_logical_fallacies": { + "acc": true + }, + "mmlu_machine_learning": { + "acc": true + }, + "mmlu_management": { + "acc": true + }, + "mmlu_marketing": { + "acc": true + }, + "mmlu_medical_genetics": { + "acc": true + }, + "mmlu_miscellaneous": { + "acc": true + }, + "mmlu_moral_disputes": { + "acc": true + }, + "mmlu_moral_scenarios": { + "acc": true + }, + "mmlu_nutrition": { + "acc": true + }, + "mmlu_other": { + "acc": true + }, + "mmlu_philosophy": { + "acc": true + }, + "mmlu_prehistory": { + "acc": true + }, + "mmlu_professional_accounting": { + "acc": true + }, + "mmlu_professional_law": { + "acc": true + }, + "mmlu_professional_medicine": { + "acc": true + }, + "mmlu_professional_psychology": { + "acc": true + }, + "mmlu_public_relations": { + "acc": true + }, + "mmlu_security_studies": { + "acc": true + }, + "mmlu_social_sciences": { + "acc": true + }, + "mmlu_sociology": { + "acc": true + }, + "mmlu_stem": { + "acc": true + }, + "mmlu_us_foreign_policy": { + "acc": true + }, + "mmlu_virology": { + "acc": true + }, + "mmlu_world_religions": { + "acc": true + }, + "piqa": { + "acc": true, + "acc_norm": true + }, + "sciq": { + "acc": true, + "acc_norm": true + }, + "wikitext": { + "word_perplexity": false, + "byte_perplexity": false, + "bits_per_byte": false + }, + "winogrande": { + "acc": true + }, + "wsc": { + "acc": true + } + }, + "n-samples": { + "wsc": { + "original": 104, + "effective": 104 + }, + "winogrande": { + "original": 1267, + "effective": 1267 + }, + "wikitext": { + "original": 62, + "effective": 62 + }, + "sciq": { + "original": 1000, + "effective": 1000 + }, + "piqa": { + "original": 1838, + "effective": 1838 + }, + "mmlu_elementary_mathematics": { + "original": 378, + "effective": 378 + }, + "mmlu_electrical_engineering": { + "original": 145, + "effective": 145 + }, + "mmlu_high_school_computer_science": { + "original": 100, + "effective": 100 + }, + "mmlu_high_school_physics": { + "original": 151, + "effective": 151 + }, + "mmlu_college_mathematics": { + "original": 100, + "effective": 100 + }, + "mmlu_college_chemistry": { + "original": 100, + "effective": 100 + }, + "mmlu_machine_learning": { + "original": 112, + "effective": 112 + }, + "mmlu_high_school_mathematics": { + "original": 270, + "effective": 270 + }, + "mmlu_computer_security": { + "original": 100, + "effective": 100 + }, + "mmlu_conceptual_physics": { + "original": 235, + "effective": 235 + }, + "mmlu_high_school_statistics": { + "original": 216, + "effective": 216 + }, + "mmlu_high_school_biology": { + "original": 310, + "effective": 310 + }, + "mmlu_astronomy": { + "original": 152, + "effective": 152 + }, + "mmlu_college_computer_science": { + "original": 100, + "effective": 100 + }, + "mmlu_college_biology": { + "original": 144, + "effective": 144 + }, + "mmlu_college_physics": { + "original": 102, + "effective": 102 + }, + "mmlu_anatomy": { + "original": 135, + "effective": 135 + }, + "mmlu_high_school_chemistry": { + "original": 203, + "effective": 203 + }, + "mmlu_abstract_algebra": { + "original": 100, + "effective": 100 + }, + "mmlu_college_medicine": { + "original": 173, + "effective": 173 + }, + "mmlu_medical_genetics": { + "original": 100, + "effective": 100 + }, + "mmlu_business_ethics": { + "original": 100, + "effective": 100 + }, + "mmlu_miscellaneous": { + "original": 783, + "effective": 783 + }, + "mmlu_nutrition": { + "original": 306, + "effective": 306 + }, + "mmlu_clinical_knowledge": { + "original": 265, + "effective": 265 + }, + "mmlu_human_aging": { + "original": 223, + "effective": 223 + }, + "mmlu_professional_accounting": { + "original": 282, + "effective": 282 + }, + "mmlu_marketing": { + "original": 234, + "effective": 234 + }, + "mmlu_global_facts": { + "original": 100, + "effective": 100 + }, + "mmlu_professional_medicine": { + "original": 272, + "effective": 272 + }, + "mmlu_virology": { + "original": 166, + "effective": 166 + }, + "mmlu_management": { + "original": 103, + "effective": 103 + }, + "mmlu_us_foreign_policy": { + "original": 100, + "effective": 100 + }, + "mmlu_sociology": { + "original": 201, + "effective": 201 + }, + "mmlu_econometrics": { + "original": 114, + "effective": 114 + }, + "mmlu_security_studies": { + "original": 245, + "effective": 245 + }, + "mmlu_high_school_geography": { + "original": 198, + "effective": 198 + }, + "mmlu_public_relations": { + "original": 110, + "effective": 110 + }, + "mmlu_high_school_microeconomics": { + "original": 238, + "effective": 238 + }, + "mmlu_professional_psychology": { + "original": 612, + "effective": 612 + }, + "mmlu_high_school_macroeconomics": { + "original": 390, + "effective": 390 + }, + "mmlu_human_sexuality": { + "original": 131, + "effective": 131 + }, + "mmlu_high_school_government_and_politics": { + "original": 193, + "effective": 193 + }, + "mmlu_high_school_psychology": { + "original": 545, + "effective": 545 + }, + "mmlu_moral_disputes": { + "original": 346, + "effective": 346 + }, + "mmlu_high_school_world_history": { + "original": 237, + "effective": 237 + }, + "mmlu_jurisprudence": { + "original": 108, + "effective": 108 + }, + "mmlu_philosophy": { + "original": 311, + "effective": 311 + }, + "mmlu_high_school_us_history": { + "original": 204, + "effective": 204 + }, + "mmlu_professional_law": { + "original": 1534, + "effective": 1534 + }, + "mmlu_logical_fallacies": { + "original": 163, + "effective": 163 + }, + "mmlu_moral_scenarios": { + "original": 895, + "effective": 895 + }, + "mmlu_formal_logic": { + "original": 126, + "effective": 126 + }, + "mmlu_prehistory": { + "original": 324, + "effective": 324 + }, + "mmlu_high_school_european_history": { + "original": 165, + "effective": 165 + }, + "mmlu_world_religions": { + "original": 171, + "effective": 171 + }, + "mmlu_international_law": { + "original": 121, + "effective": 121 + }, + "logiqa": { + "original": 651, + "effective": 651 + }, + "lambada_openai": { + "original": 5153, + "effective": 5153 + }, + "blimp_adjunct_island": { + "original": 1000, + "effective": 1000 + }, + "blimp_anaphor_gender_agreement": { + "original": 1000, + "effective": 1000 + }, + "blimp_anaphor_number_agreement": { + "original": 1000, + "effective": 1000 + }, + "blimp_animate_subject_passive": { + "original": 1000, + "effective": 1000 + }, + "blimp_animate_subject_trans": { + "original": 1000, + "effective": 1000 + }, + "blimp_causative": { + "original": 1000, + "effective": 1000 + }, + "blimp_complex_NP_island": { + "original": 1000, + "effective": 1000 + }, + "blimp_coordinate_structure_constraint_complex_left_branch": { + "original": 1000, + "effective": 1000 + }, + "blimp_coordinate_structure_constraint_object_extraction": { + "original": 1000, + "effective": 1000 + }, + "blimp_determiner_noun_agreement_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_determiner_noun_agreement_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_determiner_noun_agreement_irregular_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_determiner_noun_agreement_irregular_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_determiner_noun_agreement_with_adj_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_determiner_noun_agreement_with_adj_irregular_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_determiner_noun_agreement_with_adj_irregular_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_determiner_noun_agreement_with_adjective_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_distractor_agreement_relational_noun": { + "original": 1000, + "effective": 1000 + }, + "blimp_distractor_agreement_relative_clause": { + "original": 1000, + "effective": 1000 + }, + "blimp_drop_argument": { + "original": 1000, + "effective": 1000 + }, + "blimp_ellipsis_n_bar_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_ellipsis_n_bar_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_existential_there_object_raising": { + "original": 1000, + "effective": 1000 + }, + "blimp_existential_there_quantifiers_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_existential_there_quantifiers_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_existential_there_subject_raising": { + "original": 1000, + "effective": 1000 + }, + "blimp_expletive_it_object_raising": { + "original": 1000, + "effective": 1000 + }, + "blimp_inchoative": { + "original": 1000, + "effective": 1000 + }, + "blimp_intransitive": { + "original": 1000, + "effective": 1000 + }, + "blimp_irregular_past_participle_adjectives": { + "original": 1000, + "effective": 1000 + }, + "blimp_irregular_past_participle_verbs": { + "original": 1000, + "effective": 1000 + }, + "blimp_irregular_plural_subject_verb_agreement_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_irregular_plural_subject_verb_agreement_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_left_branch_island_echo_question": { + "original": 1000, + "effective": 1000 + }, + "blimp_left_branch_island_simple_question": { + "original": 1000, + "effective": 1000 + }, + "blimp_matrix_question_npi_licensor_present": { + "original": 1000, + "effective": 1000 + }, + "blimp_npi_present_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_npi_present_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_only_npi_licensor_present": { + "original": 1000, + "effective": 1000 + }, + "blimp_only_npi_scope": { + "original": 1000, + "effective": 1000 + }, + "blimp_passive_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_passive_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_principle_A_c_command": { + "original": 1000, + "effective": 1000 + }, + "blimp_principle_A_case_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_principle_A_case_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_principle_A_domain_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_principle_A_domain_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_principle_A_domain_3": { + "original": 1000, + "effective": 1000 + }, + "blimp_principle_A_reconstruction": { + "original": 1000, + "effective": 1000 + }, + "blimp_regular_plural_subject_verb_agreement_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_regular_plural_subject_verb_agreement_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_sentential_negation_npi_licensor_present": { + "original": 1000, + "effective": 1000 + }, + "blimp_sentential_negation_npi_scope": { + "original": 1000, + "effective": 1000 + }, + "blimp_sentential_subject_island": { + "original": 1000, + "effective": 1000 + }, + "blimp_superlative_quantifiers_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_superlative_quantifiers_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_tough_vs_raising_1": { + "original": 1000, + "effective": 1000 + }, + "blimp_tough_vs_raising_2": { + "original": 1000, + "effective": 1000 + }, + "blimp_transitive": { + "original": 1000, + "effective": 1000 + }, + "blimp_wh_island": { + "original": 1000, + "effective": 1000 + }, + "blimp_wh_questions_object_gap": { + "original": 1000, + "effective": 1000 + }, + "blimp_wh_questions_subject_gap": { + "original": 1000, + "effective": 1000 + }, + "blimp_wh_questions_subject_gap_long_distance": { + "original": 1000, + "effective": 1000 + }, + "blimp_wh_vs_that_no_gap": { + "original": 1000, + "effective": 1000 + }, + "blimp_wh_vs_that_no_gap_long_distance": { + "original": 1000, + "effective": 1000 + }, + "blimp_wh_vs_that_with_gap": { + "original": 1000, + "effective": 1000 + }, + "blimp_wh_vs_that_with_gap_long_distance": { + "original": 1000, + "effective": 1000 + }, + "arc_challenge": { + "original": 1172, + "effective": 1172 + }, + "arc_easy": { + "original": 2376, + "effective": 2376 + } + }, + "config": { + "model": "hf", + "model_args": "pretrained=EleutherAI/pythia-70m,revision=step4,dtype=float,trust_remote_code=True", + "model_num_parameters": 70426624, + "model_dtype": "torch.float32", + "model_revision": "step4", + "model_sha": "4a3529b60396c283a5dd820f841fa2cfc72a8e01", + "batch_size": "8", + "batch_sizes": [], + "device": "cuda:0", + "use_cache": null, + "limit": null, + "bootstrap_iters": 100000, + "gen_kwargs": null, + "random_seed": 0, + "numpy_seed": 1234, + "torch_seed": 1234, + "fewshot_seed": 1234 + }, + "git_hash": "a5b7c41", + "date": 1729867172.6936915, + "pretty_env_info": "PyTorch version: 2.5.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.3 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: 14.0.0-1ubuntu1.1\nCMake version: version 3.30.5\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.1.85+-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.2.140\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: NVIDIA A100-SXM4-40GB\nNvidia driver version: 535.104.05\ncuDNN version: Probably one of the following:\n/usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.6\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.6\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 12\nOn-line CPU(s) list: 0-11\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 6\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.30\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 192 KiB (6 instances)\nL1i cache: 192 KiB (6 instances)\nL2 cache: 6 MiB (6 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-11\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable\nVulnerability Reg file data sampling: Not affected\nVulnerability Retbleed: Vulnerable\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Vulnerable\nVulnerability Spectre v1: Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers\nVulnerability Spectre v2: Vulnerable; IBPB: disabled; STIBP: disabled; PBRSB-eIBRS: Vulnerable; BHI: Vulnerable (Syscall hardening enabled)\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Vulnerable\n\nVersions of relevant libraries:\n[pip3] mypy==1.13.0\n[pip3] mypy-extensions==1.0.0\n[pip3] numpy==1.26.4\n[pip3] optree==0.13.0\n[pip3] torch==2.5.0+cu121\n[pip3] torchaudio==2.5.0+cu121\n[pip3] torchsummary==1.5.1\n[pip3] torchvision==0.20.0+cu121\n[conda] Could not collect", + "transformers_version": "4.44.2", + "upper_git_hash": null, + "tokenizer_pad_token": [ + "<|endoftext|>", + "0" + ], + "tokenizer_eos_token": [ + "<|endoftext|>", + "0" + ], + "tokenizer_bos_token": [ + "<|endoftext|>", + "0" + ], + "eot_token_id": 0, + "max_length": 2048, + "task_hashes": {}, + "model_source": "hf", + "model_name": "EleutherAI/pythia-70m", + "model_name_sanitized": "EleutherAI__pythia-70m", + "system_instruction": null, + "system_instruction_sha": null, + "fewshot_as_multiturn": false, + "chat_template": null, + "chat_template_sha": null, + "start_time": 2054.479701412, + "end_time": 2666.055815323, + "total_evaluation_time_seconds": "611.576113911" +} \ No newline at end of file