text
stringlengths 0
15.3k
|
---|
def doc_to_choice(doc): |
return ast.literal_eval(doc['choices']) |
DOC_TO_TEXT = '{narrative}\n\n{question}\n\n{choices}\nAnswer:' |
def doc_to_text(doc): |
choices = '' |
for (i, choice) in enumerate(ast.literal_eval(doc['choices'])): |
choices += f'{i + 1} - {choice}\n' |
text = DOC_TO_TEXT.format(narrative=doc['narrative'], question=doc['question'], choices=choices) |
return text |
# File: lm-evaluation-harness-main/lm_eval/tasks/logiqa/utils_logiqa.py |
def doc_to_text(doc) -> str: |
choices = ['a', 'b', 'c', 'd'] |
prompt = 'Passage: ' + doc['context'] + '\n' |
prompt += 'Question: ' + doc['question'] + '\nChoices:\n' |
for (choice, option) in zip(choices, doc['options']): |
prompt += f'{choice.upper()}. {option}\n' |
prompt += 'Answer:' |
return prompt |
def doc_to_target(doc) -> int: |
choices = ['a', 'b', 'c', 'd'] |
return choices.index(doc['label'].strip()) |
# File: lm-evaluation-harness-main/lm_eval/tasks/logiqa2/utils_logiqa2.py |
def doc_to_text(doc) -> str: |
choices = ['a', 'b', 'c', 'd'] |
prompt = 'Passage: ' + doc['text'] + '\n' |
prompt += 'Question: ' + doc['question'] + '\n' |
for (choice, option) in zip(choices, doc['options']): |
prompt += f'{choice.upper()}. {option}\n' |
prompt += 'Answer:' |
return prompt |
# File: lm-evaluation-harness-main/lm_eval/tasks/med_concepts_qa/_generate_configs.py |
from typing import List |
import yaml |
def generate_yaml_content(vocab_name: str, level: str): |
content = {'dataset_name': f'{vocab_name}_{level}', 'tag': f'med_concepts_qa_{vocab_name}_tasks', 'include': '_default_template_yaml', 'task': f'med_concepts_qa_{vocab_name}_{level}', 'task_alias': f'{vocab_name}_{level}'} |
return content |
def generate_yaml_files(vocab_names: List[str], levels: List[str], file_name_prefix: str): |
for vocab_name in vocab_names: |
for level in levels: |
yaml_content = generate_yaml_content(vocab_name, level) |
filename = f'{file_name_prefix}_{vocab_name}_{level}.yaml' |
with open(filename, 'w') as yaml_file: |
yaml.dump(yaml_content, yaml_file, default_flow_style=False) |
print(f'Done to generated {filename}') |
if __name__ == '__main__': |
generate_yaml_files(vocab_names=['icd9cm', 'icd10cm', 'icd9proc', 'icd10proc', 'atc'], levels=['easy', 'medium', 'hard'], file_name_prefix='med_concepts_qa') |
# File: lm-evaluation-harness-main/lm_eval/tasks/medmcqa/utils_medmcqa.py |
def doc_to_text(doc) -> str: |
choices = [doc['opa'], doc['opb'], doc['opc'], doc['opd']] |
option_choices = {'A': choices[0], 'B': choices[1], 'C': choices[2], 'D': choices[3]} |
prompt = 'Question: ' + doc['question'] + '\nChoices:\n' |
for (choice, option) in option_choices.items(): |
prompt += f'{choice.upper()}. {option}\n' |
prompt += 'Answer:' |
return prompt |
# File: lm-evaluation-harness-main/lm_eval/tasks/medqa/preprocess_medqa.py |
def doc_to_text(doc) -> str: |
option_choices = {'A': doc['ending0'], 'B': doc['ending1'], 'C': doc['ending2'], 'D': doc['ending3']} |
answers = ''.join((f'{k}. {v}\n' for (k, v) in option_choices.items())) |
return f"Question: {doc['sent1']}\n{answers}Answer:" |
def doc_to_target(doc) -> int: |
return doc['label'] |
# File: lm-evaluation-harness-main/lm_eval/tasks/mgsm/utils.py |
import argparse |
import yaml |
LANGUAGES = {'bn': {'QUESTION': 'প্রশ্ন:', 'ANSWER': 'ধাপে ধাপে উত্তর:', 'DIRECT': 'Answer:', 'REGEX': 'The answer is (\\-?[0-9\\.\\,]+)'}, 'de': {'QUESTION': 'Frage:', 'ANSWER': 'Schritt-für-Schritt-Antwort:', 'DIRECT': 'Antwort:', 'REGEX': 'Die Antwort lautet (\\-?[0-9\\.\\,]+)'}, 'en': {'QUESTION': 'Question:', 'ANSWER': 'Step-by-Step Answer:', 'DIRECT': 'Answer:', 'REGEX': 'The answer is (\\-?[0-9\\.\\,]+)'}, 'es': {'QUESTION': 'Pregunta:', 'ANSWER': 'Respuesta paso a paso:', 'DIRECT': 'Respuesta:', 'REGEX': 'La respuesta es (\\-?[0-9\\.\\,]+)'}, 'fr': {'QUESTION': 'Question :', 'ANSWER': 'Réponse étape par étape :', 'DIRECT': 'Réponse :', 'REGEX': 'La réponse est (\\-?[0-9\\.\\,]+)'}, 'ru': {'QUESTION': 'Задача:', 'ANSWER': 'Пошаговоерешение:', 'DIRECT': 'Answer:', 'REGEX': 'Ответ — (\\-?[0-9\\.\\,]+)'}, 'sw': {'QUESTION': 'Swali:', 'ANSWER': 'Jibu la Hatua kwa Hatua:', 'DIRECT': 'Answer:', 'REGEX': 'Jibu ni (\\-?[0-9\\.\\,]+)'}, 'te': {'QUESTION': 'ప్రశ్న:', 'ANSWER': 'దశలవారీగా సమాధానం:', 'DIRECT': 'Answer:', 'REGEX': 'సమాధానం (\\-?[0-9\\.\\,]+)'}, 'th': {'QUESTION': 'โจทย์:', 'ANSWER': 'คำตอบทีละขั้นตอน:', 'DIRECT': 'Answer:', 'REGEX': 'คำตอบคือ (\\-?[0-9\\.\\,]+)'}, 'ja': {'QUESTION': '問題:', 'ANSWER': 'ステップごとの答え:', 'DIRECT': 'Answer:', 'REGEX': '答えは(\\-?[0-9\\.\\,]+)です。'}, 'zh': {'QUESTION': '问题:', 'ANSWER': '逐步解答:', 'DIRECT': 'Answer:', 'REGEX': '答案是 (\\-?[0-9\\.\\,]+)。'}} |
def add_regex_pattern(regex_pattern): |
if regex_pattern is None: |
return {} |
return {'filter_list': [{'name': 'strict-match', 'filter': [{'function': 'regex', 'regex_pattern': f'{regex_pattern}'}, {'function': 'take_first'}]}, {'name': 'flexible-extract', 'filter': [{'function': 'regex', 'regex_pattern': '(-?[$0-9.,]{2,})|(-?[0-9]+)', 'group_select': -1}, {'function': 'take_first'}]}]} |
def gen_lang_yamls(output_dir: str, overwrite: bool, mode: str) -> None: |
err = [] |
for lang in LANGUAGES.keys(): |
try: |
QUESTION = LANGUAGES[lang]['QUESTION'] |
yaml_template = 'cot_yaml' |
filter_list = {} |
DELIMITER = None |
if mode == 'direct': |
ANSWER = LANGUAGES[lang]['DIRECT'] |
REGEX = None |
task_name = f'mgsm_direct_{lang}' |
yaml_template = 'direct_yaml' |
elif mode == 'native-cot': |
ANSWER = LANGUAGES[lang]['ANSWER'] |
REGEX = LANGUAGES[lang]['REGEX'] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.