text
stringlengths 0
15.3k
|
---|
yaml.dump(yaml_dict, yaml_file, width=float('inf'), allow_unicode=True, default_style='"') |
group_yaml_dict = {'group': f'belebele_{args.task_prefix}' if args.task_prefix != '' else 'belebele', 'task': [f'belebele_{args.task_prefix}_{lang}' if args.task_prefix != '' else f'belebele_{lang}' for lang in languages if 'default' not in lang], 'aggregate_metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'weight_by_size': False}, {'metric': 'acc_norm', 'aggregation': 'mean', 'weight_by_size': False}], 'metadata': {'version': 0.0}} |
file_save_path = '_' + args.save_prefix_path + f'{args.task_prefix}.yaml' |
with open(file_save_path, 'w', encoding='utf-8') as group_yaml_file: |
yaml.dump(group_yaml_dict, group_yaml_file, width=float('inf'), allow_unicode=True, default_style='"') |
# File: lm-evaluation-harness-main/lm_eval/tasks/bigbench/generate_tasks.py |
import os |
import datasets |
import yaml |
all_subtasks = ['abstract_narrative_understanding', 'anachronisms', 'analogical_similarity', 'analytic_entailment', 'arithmetic', 'ascii_word_recognition', 'authorship_verification', 'auto_categorization', 'auto_debugging', 'bbq_lite_json', 'bridging_anaphora_resolution_barqa', 'causal_judgment', 'cause_and_effect', 'checkmate_in_one', 'chess_state_tracking', 'chinese_remainder_theorem', 'cifar10_classification', 'code_line_description', 'codenames', 'color', 'common_morpheme', 'conceptual_combinations', 'conlang_translation', 'contextual_parametric_knowledge_conflicts', 'crash_blossom', 'crass_ai', 'cryobiology_spanish', 'cryptonite', 'cs_algorithms', 'dark_humor_detection', 'date_understanding', 'disambiguation_qa', 'discourse_marker_prediction', 'disfl_qa', 'dyck_languages', 'elementary_math_qa', 'emoji_movie', 'emojis_emotion_prediction', 'empirical_judgments', 'english_proverbs', 'english_russian_proverbs', 'entailed_polarity', 'entailed_polarity_hindi', 'epistemic_reasoning', 'evaluating_information_essentiality', 'fact_checker', 'fantasy_reasoning', 'few_shot_nlg', 'figure_of_speech_detection', 'formal_fallacies_syllogisms_negation', 'gem', 'gender_inclusive_sentences_german', 'general_knowledge', 'geometric_shapes', 'goal_step_wikihow', 'gre_reading_comprehension', 'hhh_alignment', 'hindi_question_answering', 'hindu_knowledge', 'hinglish_toxicity', 'human_organs_senses', 'hyperbaton', 'identify_math_theorems', 'identify_odd_metaphor', 'implicatures', 'implicit_relations', 'intent_recognition', 'international_phonetic_alphabet_nli', 'international_phonetic_alphabet_transliterate', 'intersect_geometry', 'irony_identification', 'kanji_ascii', 'kannada', 'key_value_maps', 'known_unknowns', 'language_games', 'language_identification', 'linguistic_mappings', 'linguistics_puzzles', 'list_functions', 'logic_grid_puzzle', 'logical_args', 'logical_deduction', 'logical_fallacy_detection', 'logical_sequence', 'mathematical_induction', 'matrixshapes', 'metaphor_boolean', 'metaphor_understanding', 'minute_mysteries_qa', 'misconceptions', 'misconceptions_russian', 'mnist_ascii', 'modified_arithmetic', 'moral_permissibility', 'movie_dialog_same_or_different', 'movie_recommendation', 'mult_data_wrangling', 'multiemo', 'natural_instructions', 'navigate', 'nonsense_words_grammar', 'novel_concepts', 'object_counting', 'odd_one_out', 'operators', 'paragraph_segmentation', 'parsinlu_qa', 'parsinlu_reading_comprehension', 'penguins_in_a_table', 'periodic_elements', 'persian_idioms', 'phrase_relatedness', 'physical_intuition', 'physics', 'physics_questions', 'play_dialog_same_or_different', 'polish_sequence_labeling', 'presuppositions_as_nli', 'qa_wikidata', 'question_selection', 'real_or_fake_text', 'reasoning_about_colored_objects', 'repeat_copy_logic', 'rephrase', 'riddle_sense', 'ruin_names', 'salient_translation_error_detection', 'scientific_press_release', 'semantic_parsing_in_context_sparc', 'semantic_parsing_spider', 'sentence_ambiguity', 'similarities_abstraction', 'simp_turing_concept', 'simple_arithmetic_json', 'simple_arithmetic_json_multiple_choice', 'simple_arithmetic_json_subtasks', 'simple_arithmetic_multiple_targets_json', 'simple_ethical_questions', 'simple_text_editing', 'snarks', 'social_iqa', 'social_support', 'sports_understanding', 'strange_stories', 'strategyqa', 'sufficient_information', 'suicide_risk', 'swahili_english_proverbs', 'swedish_to_german_proverbs', 'symbol_interpretation', 'temporal_sequences', 'tense', 'timedial', 'topical_chat', 'tracking_shuffled_objects', 'understanding_fables', 'undo_permutation', 'unit_conversion', 'unit_interpretation', 'unnatural_in_context_learning', 'vitaminc_fact_verification', 'what_is_the_tao', 'which_wiki_edit', 'winowhy', 'word_sorting', 'word_unscrambling'] |
skip_tasks = ['simple_arithmetic_json_multiple_choice', 'simple_arithmetic_multiple_targets_json'] |
def main() -> None: |
for (path, task_type) in zip(['multiple_choice', 'generate_until'], ['multiple_choice_template_yaml', 'generate_until_template_yaml']): |
os.makedirs(path, exist_ok=True) |
for task in all_subtasks: |
file_name = f'{task}.yaml' |
try: |
template_file = task_type |
if path == 'multiple_choice': |
print(f'Checking {task} for multiple choices') |
if task in skip_tasks: |
continue |
data = datasets.load_dataset('hails/bigbench', task + '_zero_shot') |
multiple_choice_targets = data['default'][0]['multiple_choice_targets'] |
if len(multiple_choice_targets) == 0: |
continue |
else: |
template_file = 'multiple_choice_template_b_yaml' |
if set(data['default'][0]['targets']) < set(multiple_choice_targets): |
template_file = 'multiple_choice_template_a_yaml' |
with open(f'{path}/{file_name}', 'w', encoding='utf-8') as f: |
f.write('# Generated by utils.py\n') |
yaml.dump({'include': f'../{template_file}', 'task': 'bigbench_' + task + '_{}'.format(task_type.split('_template_yaml')[0]), 'dataset_name': task + '_zero_shot'}, f, width=float('inf'), allow_unicode=True) |
except FileExistsError: |
pass |
if __name__ == '__main__': |
main() |
# File: lm-evaluation-harness-main/lm_eval/tasks/bigbench/push_bigbench_dataset.py |
"""""" |
import bigbench.api.util as bb_utils |
import datasets |
from tqdm import tqdm |
all_task_names = bb_utils.get_all_json_task_names() |
num_shots = [0] |
for shots in num_shots: |
for task_name in tqdm(all_task_names): |
try: |
print(f"Loading '{task_name}' with num_shots={shots}...") |
task_ds = datasets.load_dataset('bigbench', name=task_name, num_shots=shots) |
print(f"Pushing '{task_name}' with num_shots={shots}...") |
task_ds.push_to_hub('hails/bigbench', task_name + '_zero_shot') |
del task_ds |
except Exception as e: |
raise e |
# File: lm-evaluation-harness-main/lm_eval/tasks/blimp/generate_configs.py |
import yaml |
all_subtasks = ['adjunct_island', 'anaphor_gender_agreement', 'anaphor_number_agreement', 'animate_subject_passive', 'animate_subject_trans', 'causative', 'complex_NP_island', 'coordinate_structure_constraint_complex_left_branch', 'coordinate_structure_constraint_object_extraction', 'determiner_noun_agreement_1', 'determiner_noun_agreement_2', 'determiner_noun_agreement_irregular_1', 'determiner_noun_agreement_irregular_2', 'determiner_noun_agreement_with_adj_2', 'determiner_noun_agreement_with_adj_irregular_1', 'determiner_noun_agreement_with_adj_irregular_2', 'determiner_noun_agreement_with_adjective_1', 'distractor_agreement_relational_noun', 'distractor_agreement_relative_clause', 'drop_argument', 'ellipsis_n_bar_1', 'ellipsis_n_bar_2', 'existential_there_object_raising', 'existential_there_quantifiers_1', 'existential_there_quantifiers_2', 'existential_there_subject_raising', 'expletive_it_object_raising', 'inchoative', 'intransitive', 'irregular_past_participle_adjectives', 'irregular_past_participle_verbs', 'irregular_plural_subject_verb_agreement_1', 'irregular_plural_subject_verb_agreement_2', 'left_branch_island_echo_question', 'left_branch_island_simple_question', 'matrix_question_npi_licensor_present', 'npi_present_1', 'npi_present_2', 'only_npi_licensor_present', 'only_npi_scope', 'passive_1', 'passive_2', 'principle_A_c_command', 'principle_A_case_1', 'principle_A_case_2', 'principle_A_domain_1', 'principle_A_domain_2', 'principle_A_domain_3', 'principle_A_reconstruction', 'regular_plural_subject_verb_agreement_1', 'regular_plural_subject_verb_agreement_2', 'sentential_negation_npi_licensor_present', 'sentential_negation_npi_scope', 'sentential_subject_island', 'superlative_quantifiers_1', 'superlative_quantifiers_2', 'tough_vs_raising_1', 'tough_vs_raising_2', 'transitive', 'wh_island', 'wh_questions_object_gap', 'wh_questions_subject_gap', 'wh_questions_subject_gap_long_distance', 'wh_vs_that_no_gap', 'wh_vs_that_no_gap_long_distance', 'wh_vs_that_with_gap', 'wh_vs_that_with_gap_long_distance'] |
def main() -> None: |
for task in all_subtasks: |
file_name = f'{task}.yaml' |
try: |
with open(f'{file_name}', 'w', encoding='utf-8') as f: |
f.write('# Generated by utils.py\n') |
yaml.dump({'include': '_template_yaml', 'task': 'blimp_' + task, 'dataset_name': task}, f) |
except FileExistsError: |
pass |
if __name__ == '__main__': |
main() |
# File: lm-evaluation-harness-main/lm_eval/tasks/ceval/_generate_configs.py |
"""""" |
import argparse |
import os |
import yaml |
from tqdm import tqdm |
from lm_eval.utils import eval_logger |
SUBJECTS = {'computer_network': '计算机网络', 'operating_system': '操作系统', 'computer_architecture': '计算机组成', 'college_programming': '大学编程', 'college_physics': '大学物理', 'college_chemistry': '大学化学', 'advanced_mathematics': '高等数学', 'probability_and_statistics': '概率统计', 'discrete_mathematics': '离散数学', 'electrical_engineer': '注册电气工程师', 'metrology_engineer': '注册计量师', 'high_school_mathematics': '高中数学', 'high_school_physics': '高中物理', 'high_school_chemistry': '高中化学', 'high_school_biology': '高中生物', 'middle_school_mathematics': '初中数学', 'middle_school_biology': '初中生物', 'middle_school_physics': '初中物理', 'middle_school_chemistry': '初中化学', 'veterinary_medicine': '兽医学', 'college_economics': '大学经济学', 'business_administration': '工商管理', 'marxism': '马克思主义基本原理', 'mao_zedong_thought': '毛泽东思想和中国特色社会主义理论体系概论', 'education_science': '教育学', 'teacher_qualification': '教师资格', 'high_school_politics': '高中政治', 'high_school_geography': '高中地理', 'middle_school_politics': '初中政治', 'middle_school_geography': '初中地理', 'modern_chinese_history': '近代史纲要', 'ideological_and_moral_cultivation': '思想道德修养与法律基础', 'logic': '逻辑学', 'law': '法学', 'chinese_language_and_literature': '中国语言文学', 'art_studies': '艺术学', 'professional_tour_guide': '导游资格', 'legal_professional': '法律职业资格', 'high_school_chinese': '高中语文', 'high_school_history': '高中历史', 'middle_school_history': '初中历史', 'civil_servant': '公务员', 'sports_science': '体育学', 'plant_protection': '植物保护', 'basic_medicine': '基础医学', 'clinical_medicine': '临床医学', 'urban_and_rural_planner': '注册城乡规划师', 'accountant': '注册会计师', 'fire_engineer': '注册消防工程师', 'environmental_impact_assessment_engineer': '环境影响评价工程师', 'tax_accountant': '税务师', 'physician': '医师资格'} |
def parse_args(): |
parser = argparse.ArgumentParser() |
parser.add_argument('--base_yaml_path', required=True) |
parser.add_argument('--save_prefix_path', default='ceval-valid') |
parser.add_argument('--cot_prompt_path', default=None) |
parser.add_argument('--task_prefix', default='') |
return parser.parse_args() |
if __name__ == '__main__': |
args = parse_args() |
base_yaml_name = os.path.split(args.base_yaml_path)[-1] |
with open(args.base_yaml_path, encoding='utf-8') as f: |
base_yaml = yaml.full_load(f) |
if args.cot_prompt_path is not None: |
import json |
with open(args.cot_prompt_path, encoding='utf-8') as f: |
cot_file = json.load(f) |
for (subject_eng, subject_zh) in tqdm(SUBJECTS.items()): |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.