text
stringlengths 0
15.3k
|
---|
duplicate_tasks = {task_name for task_name in subtask_names if subtask_names.count(task_name) > 1} |
competing_groups = [group for group in task_dict.keys() if len(set(task_dict[group]).intersection(duplicate_tasks)) > 0] |
if len(duplicate_tasks) > 0: |
raise ValueError(f'Found 1 or more tasks while trying to call get_task_dict() that were members of more than 1 called group: {list(duplicate_tasks)}. Offending groups: {competing_groups}. Please call groups which overlap their constituent tasks in separate evaluation runs.') |
def get_task_dict(task_name_list: Union[str, List[Union[str, Dict, Task]]], task_manager: Optional[TaskManager]=None): |
task_name_from_string_dict = {} |
task_name_from_config_dict = {} |
task_name_from_object_dict = {} |
if isinstance(task_name_list, str): |
task_name_list = [task_name_list] |
elif isinstance(task_name_list, list): |
if not all([isinstance(task, (str, dict, Task)) for task in task_name_list]): |
raise TypeError("Expected all list items to be of types 'str', 'dict', or 'Task', but at least one entry did not match.") |
else: |
raise TypeError(f"Expected a 'str' or 'list' but received {type(task_name_list)}.") |
string_task_name_list = [task for task in task_name_list if isinstance(task, str)] |
others_task_name_list = [task for task in task_name_list if not isinstance(task, str)] |
if len(string_task_name_list) > 0: |
if task_manager is None: |
task_manager = TaskManager() |
task_name_from_string_dict = task_manager.load_task_or_group(string_task_name_list) |
for task_element in others_task_name_list: |
if isinstance(task_element, dict): |
task_name_from_config_dict = {**task_name_from_config_dict, **task_manager.load_config(config=task_element)} |
elif isinstance(task_element, Task): |
task_name_from_object_dict = {**task_name_from_object_dict, get_task_name_from_object(task_element): task_element} |
if not set(task_name_from_string_dict.keys()).isdisjoint(set(task_name_from_object_dict.keys())): |
raise ValueError |
final_task_dict = {**task_name_from_string_dict, **task_name_from_config_dict, **task_name_from_object_dict} |
_check_duplicates(get_subtask_list(final_task_dict)) |
return final_task_dict |
# File: lm-evaluation-harness-main/lm_eval/tasks/aclue/_generate_configs.py |
"""""" |
import argparse |
import os |
import yaml |
from tqdm import tqdm |
from lm_eval.utils import eval_logger |
SUBJECTS = {'古文单字多义': 'polysemy_resolution', '诗词情感分类': 'poetry_sentiment_analysis', '古汉语命名体识别': 'named_entity_recognition', '古汉语知识': 'basic_ancient_chinese', '古诗词上下句预测': 'poetry_context_prediction', '古文断句': 'sentence_segmentation', '对联': 'couplet_prediction', '古诗词曲鉴赏': 'poetry_appreciate', '国学常识': 'ancient_chinese_culture', '古音学': 'ancient_phonetics', '通假字': 'homographic_character_resolution', '古代文学知识': 'ancient_literature', '医古文': 'ancient_medical', '古诗词质量评估': 'poetry_quality_assessment', '古文阅读理解': 'reading_comprehension'} |
def parse_args(): |
parser = argparse.ArgumentParser() |
parser.add_argument('--base_yaml_path', required=True) |
parser.add_argument('--save_prefix_path', default='aclue') |
parser.add_argument('--cot_prompt_path', default=None) |
parser.add_argument('--task_prefix', default='') |
return parser.parse_args() |
if __name__ == '__main__': |
args = parse_args() |
base_yaml_name = os.path.split(args.base_yaml_path)[-1] |
with open(args.base_yaml_path, encoding='utf-8') as f: |
base_yaml = yaml.full_load(f) |
if args.cot_prompt_path is not None: |
import json |
with open(args.cot_prompt_path, encoding='utf-8') as f: |
cot_file = json.load(f) |
for (subject_zh, subject_eng) in tqdm(SUBJECTS.items()): |
if args.cot_prompt_path is not None: |
description = cot_file[subject_eng] |
else: |
description = f'以下是关于{subject_zh}的单项选择题,请直接给出正确答案的选项。\n\n' |
yaml_dict = {'include': base_yaml_name, 'task': f'aclue_{args.task_prefix}_{subject_eng}' if args.task_prefix != '' else f'aclue_{subject_eng}', 'dataset_name': subject_eng, 'description': description} |
file_save_path = args.save_prefix_path + f'_{subject_eng}.yaml' |
eval_logger.info(f'Saving yaml for subset {subject_eng} to {file_save_path}') |
with open(file_save_path, 'w', encoding='utf-8') as yaml_file: |
yaml.dump(yaml_dict, yaml_file, width=float('inf'), allow_unicode=True, default_style='"') |
# File: lm-evaluation-harness-main/lm_eval/tasks/afrimgsm/utils.py |
import argparse |
import yaml |
languages = ['eng', 'amh', 'ibo', 'fra', 'sna', 'lin', 'wol', 'ewe', 'lug', 'xho', 'kin', 'twi', 'zul', 'orm', 'yor', 'hau', 'sot', 'swa'] |
languages_REGEX = {'eng': 'The answer is (\\-?[0-9\\.\\,]+)', 'amh': 'መልሱ (\\-?[0-9\\.\\,]+)', 'ibo': 'Azịza ya bụ (\\-?[0-9\\.\\,]+)', 'fra': 'La réponse est(\\-?[0-9\\.\\,]+)', 'sna': 'Mhinduro kumubvunzo ndi (\\-?[0-9\\.\\,]+)', 'lin': 'Eyano ezali (\\-?[0-9\\.\\,]+)', 'wol': 'Tontu li (\\-?[0-9\\.\\,]+)', 'ewe': 'ŋuɖoɖoae nye (\\-?[0-9\\.\\,]+)', 'lug': 'Ansa eri (\\-?[0-9\\.\\,]+)', 'xho': 'Impendulo ngu (\\-?[0-9\\.\\,]+)', 'kin': 'Igisubizo ni (\\-?[0-9\\.\\,]+)', 'twi': 'Ne nnyiano yɛ (\\-?[0-9\\.\\,]+)', 'zul': 'Impendulo ithi (\\-?[0-9\\.\\,]+)', 'orm': 'Deebiin isaa (\\-?[0-9\\.\\,]+)', 'yor': 'Ìdáhùn náà ni (\\-?[0-9\\.\\,]+)', 'hau': 'Amsar ita ce (\\-?[0-9\\.\\,]+)', 'sot': 'Karabo ke (\\-?[0-9\\.\\,]+)', 'swa': 'Jibu ni (\\-?[0-9\\.\\,]+)'} |
LANGUAGES = {} |
for lang in languages: |
if lang == 'amh': |
LANGUAGES[lang] = {'QUESTION': 'ጥያቄ:', 'ANSWER': 'በቅደም ተከተል መልስ:', 'DIRECT': 'Answer:', 'REGEX': languages_REGEX[lang]} |
elif lang == 'yor': |
LANGUAGES[lang] = {'QUESTION': 'Ìbéèrè:', 'ANSWER': 'Ìdáhùn lẹ́sẹsẹ:', 'DIRECT': 'Answer:', 'REGEX': languages_REGEX[lang]} |
else: |
LANGUAGES[lang] = {'QUESTION': 'Question:', 'ANSWER': 'Step-by-Step Answer:', 'DIRECT': 'Answer:', 'REGEX': languages_REGEX[lang]} |
def add_regex_pattern(regex_pattern): |
if regex_pattern is None: |
return {} |
return {'filter_list': [{'name': 'strict-match', 'filter': [{'function': 'regex', 'regex_pattern': f'{regex_pattern}'}, {'function': 'take_first'}]}, {'name': 'flexible-extract', 'filter': [{'function': 'regex', 'regex_pattern': '(-?[$0-9.,]{2,})|(-?[0-9]+)', 'group_select': -1}, {'function': 'take_first'}]}]} |
def gen_lang_yamls(output_dir: str, overwrite: bool, mode: str) -> None: |
err = [] |
for lang in LANGUAGES.keys(): |
try: |
yaml_template = 'cot_yaml' |
filter_list = {} |
DELIMITER = None |
if mode == 'direct': |
ANSWER = LANGUAGES['eng']['DIRECT'] |
QUESTION = LANGUAGES['eng']['QUESTION'] |
REGEX = None |
task_name = f'afrimgsm_direct_{lang}' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.