text
stringlengths
0
15.3k
base_yaml = yaml.full_load(f)
if args.cot_prompt_path is not None:
import json
with open(args.cot_prompt_path, encoding='utf-8') as f:
cot_file = json.load(f)
ALL_CATEGORIES = []
for (subject, category) in tqdm(SUBJECTS.items()):
if category not in ALL_CATEGORIES:
ALL_CATEGORIES.append(category)
if args.cot_prompt_path is not None:
description = cot_file[subject]
else:
description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n"
yaml_dict = {'include': base_yaml_name, 'tag': f'mmlu_{args.task_prefix}_{category}' if args.task_prefix != '' else f'mmlu_{category}', 'task': f'mmlu_{args.task_prefix}_{subject}' if args.task_prefix != '' else f'mmlu_{subject}', 'task_alias': subject.replace('_', ' '), 'dataset_name': subject, 'description': description}
file_save_path = args.save_prefix_path + f'_{subject}.yaml'
eval_logger.info(f'Saving yaml for subset {subject} to {file_save_path}')
with open(file_save_path, 'w', encoding='utf-8') as yaml_file:
yaml.dump(yaml_dict, yaml_file, allow_unicode=True, default_style='"')
if args.task_prefix != '':
mmlu_subcategories = [f'mmlu_{args.task_prefix}_{category}' for category in ALL_CATEGORIES]
else:
mmlu_subcategories = [f'mmlu_{category}' for category in ALL_CATEGORIES]
if args.group_prefix != '':
file_save_path = args.group_prefix + '.yaml'
else:
file_save_path = args.save_prefix_path + '.yaml'
eval_logger.info(f'Saving benchmark config to {file_save_path}')
with open(file_save_path, 'w', encoding='utf-8') as yaml_file:
yaml.dump({'group': f'mmlu_{args.task_prefix}' if args.task_prefix != '' else 'mmlu', 'task': mmlu_subcategories}, yaml_file, indent=4, default_flow_style=False)
# File: lm-evaluation-harness-main/lm_eval/tasks/mmlu/flan_cot_zeroshot/utils.py
import re
import sys
import unicodedata
from lm_eval.filters.extraction import RegexFilter
class MultiChoiceRegexFilter(RegexFilter):
""""""
def __init__(self, regex_pattern: str='#### (\\-?[0-9\\.\\,]+)', group_select=0, fallback: str='[invalid]', ignore_case=False, ignore_punctuation=False, regexes_to_ignore=None) -> None:
super().__init__(regex_pattern, group_select, fallback)
self.ignore_case = ignore_case
self.ignore_punctuation = ignore_punctuation
self.regexes_to_ignore = regexes_to_ignore
def apply(self, resps, docs):
def find_match(regex, resp, convert_dict={}):
match = regex.findall(resp)
if match:
match = match[self.group_select]
if isinstance(match, tuple):
match = [m for m in match if m][0]
match = match.strip()
if match and match in convert_dict:
match = convert_dict[match]
return match
punct_tbl = dict.fromkeys((i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P')))
def filter_ignores(st):
if self.regexes_to_ignore is not None:
for s in self.regexes_to_ignore:
st = re.sub(s, '', st)
if self.ignore_case:
st = st.lower()
if self.ignore_punctuation:
st = st.translate(punct_tbl)
return st
filtered_resps = []
for (r, doc) in zip(resps, docs):
fallback_regexes = []
choice_to_alpha = {}
next_alpha = 'A'
without_paren_fallback_regexes = []
without_paren_to_target = {}
choices = doc['choices']
for c in choices:
m = filter_ignores(c.strip())
fallback_regexes.append(f'{re.escape(m)}')
choice_to_alpha[m] = f'({next_alpha})'
without_paren_fallback_regexes.append(next_alpha)
without_paren_to_target[next_alpha] = f'({next_alpha})'
next_alpha = chr(ord(next_alpha) + 1)
fallback_regex = re.compile('|'.join(fallback_regexes))
without_paren_fallback_regex = '|'.join(without_paren_fallback_regexes)
without_paren_fallback_regex = re.compile(f':[\\s]*({without_paren_fallback_regex})')
filtered = []
for resp in r:
match = find_match(self.regex, resp)
if not match:
match = find_match(fallback_regex, filter_ignores(resp), choice_to_alpha)
if not match:
match = find_match(without_paren_fallback_regex, resp, without_paren_to_target)
if not match:
match = self.fallback
filtered.append(match)
filtered_resps.append(filtered)
return filtered_resps
# File: lm-evaluation-harness-main/lm_eval/tasks/mmlu/flan_n_shot/generative/utils.py