text
stringlengths 0
15.3k
|
---|
class timeout: |
def __init__(self, seconds=1, error_message='Timeout'): |
self.seconds = seconds |
self.error_message = error_message |
def handle_timeout(self, signum, frame): |
raise TimeoutError(self.error_message) |
def __enter__(self): |
signal.signal(signal.SIGALRM, self.handle_timeout) |
signal.alarm(self.seconds) |
def __exit__(self, type, value, traceback): |
signal.alarm(0) |
def is_equiv(x1: str, x2: str) -> bool: |
try: |
with timeout(seconds=5): |
try: |
parsed_x1 = parse_latex(x1) |
parsed_x2 = parse_latex(x2) |
except (sympy.parsing.latex.errors.LaTeXParsingError, sympy.SympifyError, TypeError): |
eval_logger.debug(f"couldn't parse one of {x1} or {x2}") |
return False |
try: |
diff = parsed_x1 - parsed_x2 |
except TypeError: |
eval_logger.debug(f"couldn't subtract {x1} and {x2}") |
return False |
try: |
if sympy.simplify(diff) == 0: |
return True |
else: |
return False |
except ValueError: |
eval_logger.debug(f'Had some trouble simplifying when comparing {x1} and {x2}') |
except TimeoutError: |
eval_logger.debug(f'Timed out comparing {x1} and {x2}') |
return False |
except ImportError as e: |
eval_logger.error(e) |
raise |
except Exception as e: |
eval_logger.debug(f'Failed comparing {x1} and {x2} with {e}') |
return False |
def get_unnormalized_answer(text: str) -> str: |
INVALID_ANSWER = '[invalidanswer]' |
end_seq = 'I hope it is correct.' |
text += end_seq |
match = re.search('Final Answer: The final answer is(.*?). I hope it is correct.', text) |
if match: |
return match.group(1).strip() |
else: |
return INVALID_ANSWER |
SUBSTITUTIONS = [('an ', ''), ('a ', ''), ('.$', '$'), ('\\$', ''), ('\\ ', ''), (' ', ''), ('mbox', 'text'), (',\\text{and}', ','), ('\\text{and}', ','), ('\\text{m}', '\\text{}')] |
REMOVED_EXPRESSIONS = ['square', 'ways', 'integers', 'dollars', 'mph', 'inches', 'ft', 'hours', 'km', 'units', '\\ldots', 'sue', 'points', 'feet', 'minutes', 'digits', 'cents', 'degrees', 'cm', 'gm', 'pounds', 'meters', 'meals', 'edges', 'students', 'childrentickets', 'multiples', '\\text{s}', '\\text{.}', '\\text{\ns}', '\\text{}^2', '\\text{}^3', '\\text{\n}', '\\text{}', '\\mathrm{th}', '^\\circ', '^{\\circ}', '\\;', ',\\!', '{,}', '"', '\\dots'] |
def normalize_final_answer(final_answer: str) -> str: |
final_answer = final_answer.split('=')[-1] |
for (before, after) in SUBSTITUTIONS: |
final_answer = final_answer.replace(before, after) |
for expr in REMOVED_EXPRESSIONS: |
final_answer = final_answer.replace(expr, '') |
final_answer = re.sub('(.*?)(\\$)(.*?)(\\$)(.*)', '$\\3$', final_answer) |
final_answer = re.sub('(\\\\text\\{)(.*?)(\\})', '\\2', final_answer) |
final_answer = re.sub('(\\\\textbf\\{)(.*?)(\\})', '\\2', final_answer) |
final_answer = re.sub('(\\\\overline\\{)(.*?)(\\})', '\\2', final_answer) |
final_answer = re.sub('(\\\\boxed\\{)(.*)(\\})', '\\2', final_answer) |
final_answer = re.sub('(frac)([^{])(.)', 'frac{\\2}{\\3}', final_answer) |
final_answer = re.sub('(sqrt)([^{])', 'sqrt{\\2}', final_answer) |
final_answer = final_answer.replace('$', '') |
if final_answer.replace(',', '').isdigit(): |
final_answer = final_answer.replace(',', '') |
return final_answer |
# File: lm-evaluation-harness-main/lm_eval/tasks/mmlu/_generate_configs.py |
"""""" |
import argparse |
import logging |
import os |
import yaml |
from tqdm import tqdm |
eval_logger = logging.getLogger('lm-eval') |
SUBJECTS = {'abstract_algebra': 'stem', 'anatomy': 'stem', 'astronomy': 'stem', 'business_ethics': 'other', 'clinical_knowledge': 'other', 'college_biology': 'stem', 'college_chemistry': 'stem', 'college_computer_science': 'stem', 'college_mathematics': 'stem', 'college_medicine': 'other', 'college_physics': 'stem', 'computer_security': 'stem', 'conceptual_physics': 'stem', 'econometrics': 'social_sciences', 'electrical_engineering': 'stem', 'elementary_mathematics': 'stem', 'formal_logic': 'humanities', 'global_facts': 'other', 'high_school_biology': 'stem', 'high_school_chemistry': 'stem', 'high_school_computer_science': 'stem', 'high_school_european_history': 'humanities', 'high_school_geography': 'social_sciences', 'high_school_government_and_politics': 'social_sciences', 'high_school_macroeconomics': 'social_sciences', 'high_school_mathematics': 'stem', 'high_school_microeconomics': 'social_sciences', 'high_school_physics': 'stem', 'high_school_psychology': 'social_sciences', 'high_school_statistics': 'stem', 'high_school_us_history': 'humanities', 'high_school_world_history': 'humanities', 'human_aging': 'other', 'human_sexuality': 'social_sciences', 'international_law': 'humanities', 'jurisprudence': 'humanities', 'logical_fallacies': 'humanities', 'machine_learning': 'stem', 'management': 'other', 'marketing': 'other', 'medical_genetics': 'other', 'miscellaneous': 'other', 'moral_disputes': 'humanities', 'moral_scenarios': 'humanities', 'nutrition': 'other', 'philosophy': 'humanities', 'prehistory': 'humanities', 'professional_accounting': 'other', 'professional_law': 'humanities', 'professional_medicine': 'other', 'professional_psychology': 'social_sciences', 'public_relations': 'social_sciences', 'security_studies': 'social_sciences', 'sociology': 'social_sciences', 'us_foreign_policy': 'social_sciences', 'virology': 'other', 'world_religions': 'humanities'} |
def parse_args(): |
parser = argparse.ArgumentParser() |
parser.add_argument('--base_yaml_path', required=True) |
parser.add_argument('--save_prefix_path', default='mmlu') |
parser.add_argument('--cot_prompt_path', default=None) |
parser.add_argument('--task_prefix', default='') |
parser.add_argument('--group_prefix', default='') |
return parser.parse_args() |
if __name__ == '__main__': |
args = parse_args() |
base_yaml_name = os.path.split(args.base_yaml_path)[-1] |
with open(args.base_yaml_path, encoding='utf-8') as f: |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.