text
stringlengths
0
15.3k
import re
import sys
import unicodedata
from lm_eval.filters.extraction import RegexFilter
class MultiChoiceRegexFilter(RegexFilter):
""""""
def __init__(self, regex_pattern: str='#### (\\-?[0-9\\.\\,]+)', group_select=0, fallback: str='[invalid]', ignore_case=False, ignore_punctuation=False, regexes_to_ignore=None) -> None:
super().__init__(regex_pattern, group_select, fallback)
self.ignore_case = ignore_case
self.ignore_punctuation = ignore_punctuation
self.regexes_to_ignore = regexes_to_ignore
def apply(self, resps, docs):
def find_match(regex, resp, convert_dict={}):
match = regex.findall(resp)
if match:
match = match[self.group_select]
if isinstance(match, tuple):
match = [m for m in match if m][0]
match = match.strip()
if match and match in convert_dict:
match = convert_dict[match]
return match
punct_tbl = dict.fromkeys((i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P')))
def filter_ignores(st):
if self.regexes_to_ignore is not None:
for s in self.regexes_to_ignore:
st = re.sub(s, '', st)
if self.ignore_case:
st = st.lower()
if self.ignore_punctuation:
st = st.translate(punct_tbl)
return st
filtered_resps = []
for (r, doc) in zip(resps, docs):
fallback_regexes = []
choice_to_alpha = {}
next_alpha = 'A'
without_paren_fallback_regexes = []
without_paren_to_target = {}
choices = doc['choices']
for c in choices:
m = filter_ignores(c.strip())
fallback_regexes.append(f'{re.escape(m)}')
choice_to_alpha[m] = f'({next_alpha})'
without_paren_fallback_regexes.append(next_alpha)
without_paren_to_target[next_alpha] = f'({next_alpha})'
next_alpha = chr(ord(next_alpha) + 1)
fallback_regex = re.compile('|'.join(fallback_regexes))
without_paren_fallback_regex = '|'.join(without_paren_fallback_regexes)
without_paren_fallback_regex = re.compile(f':[\\s]*({without_paren_fallback_regex})')
filtered = []
for resp in r:
match = find_match(self.regex, resp)
if not match:
match = find_match(fallback_regex, filter_ignores(resp), choice_to_alpha)
if not match:
match = find_match(without_paren_fallback_regex, resp, without_paren_to_target)
if not match:
match = self.fallback
filtered.append(match)
filtered_resps.append(filtered)
return filtered_resps
# File: lm-evaluation-harness-main/lm_eval/tasks/mmlusr/answer_only/utils.py
import datasets
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
def _helper(doc):
answer_list = ['A', 'B', 'C', 'D']
answer_index = int(doc['answer'])
answer_letter = answer_list[answer_index]
out_doc = {'questions': doc['question'], 'choices': [doc['choice1'], doc['choice2'], doc['choice3'], doc['choice4']], 'answer': answer_letter}
return out_doc
return dataset.map(_helper)
# File: lm-evaluation-harness-main/lm_eval/tasks/mmlusr/config.py
""""""
import argparse
import logging
import os
import yaml
from tqdm import tqdm
eval_logger = logging.getLogger('lm-eval')
SUBJECTS = {'abstract_algebra': 'stem', 'anatomy': 'stem', 'astronomy': 'stem', 'business_ethics': 'other', 'clinical_knowledge': 'other', 'college_biology': 'stem', 'college_chemistry': 'stem', 'college_computer_science': 'stem', 'college_mathematics': 'stem', 'college_medicine': 'other', 'college_physics': 'stem', 'computer_security': 'stem', 'conceptual_physics': 'stem', 'econometrics': 'social_sciences', 'electrical_engineering': 'stem', 'elementary_mathematics': 'stem', 'formal_logic': 'humanities', 'global_facts': 'other', 'high_school_biology': 'stem', 'high_school_chemistry': 'stem', 'high_school_computer_science': 'stem', 'high_school_european_history': 'humanities', 'high_school_geography': 'social_sciences', 'high_school_government_and_politics': 'social_sciences', 'high_school_macroeconomics': 'social_sciences', 'high_school_mathematics': 'stem', 'high_school_microeconomics': 'social_sciences', 'high_school_physics': 'stem', 'high_school_psychology': 'social_sciences', 'high_school_statistics': 'stem', 'high_school_us_history': 'humanities', 'high_school_world_history': 'humanities', 'human_aging': 'other', 'human_sexuality': 'social_sciences', 'international_law': 'humanities', 'jurisprudence': 'humanities', 'logical_fallacies': 'humanities', 'machine_learning': 'stem', 'management': 'other', 'marketing': 'other', 'medical_genetics': 'other', 'miscellaneous': 'other', 'moral_disputes': 'humanities', 'moral_scenarios': 'humanities', 'nutrition': 'other', 'philosophy': 'humanities', 'prehistory': 'humanities', 'professional_accounting': 'other', 'professional_law': 'humanities', 'professional_medicine': 'other', 'professional_psychology': 'social_sciences', 'public_relations': 'social_sciences', 'security_studies': 'social_sciences', 'sociology': 'social_sciences', 'us_foreign_policy': 'social_sciences', 'virology': 'other', 'world_religions': 'humanities'}
GROUPS = ['question_and_answer']
def parse_args():
parser = argparse.ArgumentParser(description='Generate configuration YAML files for LM Evaluation Harness.')
parser.add_argument('--base_yaml_path', required=True, help='Path to the base YAML configuration file.')
parser.add_argument('--save_dir', default='/data/local/cat/lm-evaluation-harness/lm_eval/tasks/mmlusr/question_and_answer')
parser.add_argument('--task_prefix', default='')
parser.add_argument('--cot_prompt_path', default=None)
parser.add_argument('--group_prefix', default='')
return parser.parse_args()