text
stringlengths 0
15.3k
|
---|
yaml_template = 'direct_yaml' |
if mode == 'direct-native': |
ANSWER = LANGUAGES[lang]['DIRECT'] |
QUESTION = LANGUAGES[lang]['QUESTION'] |
REGEX = None |
task_name = f'afrimgsm_direct_native_{lang}' |
yaml_template = 'direct_native_yaml' |
elif mode == 'native-cot': |
ANSWER = LANGUAGES[lang]['ANSWER'] |
REGEX = LANGUAGES[lang]['REGEX'] |
QUESTION = LANGUAGES[lang]['QUESTION'] |
task_name = f'afrimgsm_native_cot_{lang}' |
filter_list = add_regex_pattern(REGEX) |
DELIMITER = '' if lang in ['zh', 'ja'] else None |
elif mode == 'en-cot': |
ANSWER = LANGUAGES['eng']['ANSWER'] |
REGEX = LANGUAGES['eng']['REGEX'] |
QUESTION = LANGUAGES['eng']['QUESTION'] |
task_name = f'afrimgsm_en_cot_{lang}' |
elif mode == 'translate-direct': |
ANSWER = LANGUAGES['eng']['DIRECT'] |
QUESTION = LANGUAGES['eng']['QUESTION'] |
REGEX = None |
task_name = f'afrimgsm_translate_direct_{lang}' |
yaml_template = 'translate_direct_yaml' |
file_name = f'{task_name}.yaml' |
ANSWER_TO_SKIP = len(LANGUAGES[lang]['ANSWER']) + 1 |
with open(f'{output_dir}/{file_name}', 'w' if overwrite else 'x', encoding='utf8') as f: |
f.write('# Generated by utils.py\n') |
yaml.dump({'include': yaml_template, 'dataset_name': lang, 'task': f'{task_name}', 'doc_to_text': f'{{% if answer is not none %}}{{{{question+"\\n{ANSWER}"}}}}{{% else %}}{{{{"{QUESTION} "+question+"\\n{ANSWER}"}}}}{{% endif %}}', 'doc_to_target': f'{{% if answer is not none %}}{{{{answer[{ANSWER_TO_SKIP}:]}}}}{{% else %}}{{{{answer_number|string}}}}{{% endif %}}', **filter_list, 'generation_kwargs': {'until': [QUESTION, '</s>', '<|im_end|>'], 'do_sample': False}, **({'target_delimiter': DELIMITER} if DELIMITER else {})}, f, allow_unicode=True, width=float('inf')) |
except FileExistsError: |
err.append(file_name) |
if len(err) > 0: |
raise FileExistsError(f"Files were not created because they already exist (use --overwrite flag): {', '.join(err)}") |
def main() -> None: |
parser = argparse.ArgumentParser() |
parser.add_argument('--overwrite', default=False, action='store_true', help='Overwrite files if they already exist') |
parser.add_argument('--output-dir', default='.', help='Directory to write yaml files to') |
parser.add_argument('--mode', default='native-cot', choices=['direct', 'direct-native', 'native-cot', 'en-cot', 'translate-direct'], help='Mode of chain-of-thought') |
args = parser.parse_args() |
gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite, mode=args.mode) |
if __name__ == '__main__': |
main() |
# File: lm-evaluation-harness-main/lm_eval/tasks/afrimmlu/direct/utils.py |
from sklearn.metrics import f1_score |
def doc_to_choice(doc): |
choices = eval(doc['choices']) |
return choices |
def doc_to_text(doc): |
output = 'You are a highly knowledgeable and intelligent artificial intelligence\n model answers multiple-choice questions about {subject}\n\n Question: {question}\n\n Choices:\n A: {choice1}\n B: {choice2}\n C: {choice3}\n D: {choice4}\n\n Answer: ' |
choices = eval(doc['choices']) |
text = output.format(subject=doc['subject'], question=doc['question'], choice1=choices[0], choice2=choices[1], choice3=choices[2], choice4=choices[3]) |
return text |
def weighted_f1_score(items): |
unzipped_list = list(zip(*items)) |
golds = unzipped_list[0] |
preds = unzipped_list[1] |
fscore = f1_score(golds, preds, average='weighted') |
return fscore |
# File: lm-evaluation-harness-main/lm_eval/tasks/afrimmlu/translate/utils.py |
from sklearn.metrics import f1_score |
def doc_to_choice(doc): |
choices = eval(doc['choices']) |
return choices |
def doc_to_text(doc): |
output = "You are a highly knowledgeable and intelligent artificial intelligence\n model answers multiple-choice questions about '{subject}'\n\n Question: '''{question}'''\n\n Choices:\n A: ''{choice1}'''\n B: ''{choice2}'''\n C: ''{choice3}'''\n D: ''{choice4}'''\n\n Answer: " |
choices = eval(doc['choices']) |
text = output.format(subject=doc['subject'], question=doc['question'], choice1=choices[0], choice2=choices[1], choice3=choices[2], choice4=choices[3]) |
return text |
def weighted_f1_score(items): |
unzipped_list = list(zip(*items)) |
golds = unzipped_list[0] |
preds = unzipped_list[1] |
fscore = f1_score(golds, preds, average='weighted') |
return fscore |
# File: lm-evaluation-harness-main/lm_eval/tasks/afrimmlu/utils.py |
from sklearn.metrics import f1_score |
def doc_to_choice(doc): |
choices = eval(doc['choices']) |
return choices |
def doc_to_text(doc): |
output = "You are a highly knowledgeable and intelligent artificial intelligence\n model answers multiple-choice questions about '{subject}'\n\n Question: '''{question}'''\n\n Choices:\n A: ''{choice1}'''\n B: ''{choice2}'''\n C: ''{choice3}'''\n D: ''{choice4}'''\n\n Answer: " |
choices = eval(doc['choices']) |
text = output.format(subject=doc['subject'], question=doc['question'], choice1=choices[0], choice2=choices[1], choice3=choices[2], choice4=choices[3]) |
return text |
def weighted_f1_score(items): |
unzipped_list = list(zip(*items)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.