text
stringlengths 0
15.3k
|
---|
if __name__ == '__main__': |
args = parse_args() |
base_yaml_name = os.path.basename(args.base_yaml_path) |
with open(args.base_yaml_path, 'r', encoding='utf-8') as f: |
base_yaml = yaml.full_load(f) |
if args.cot_prompt_path is not None: |
import json |
with open(args.cot_prompt_path, encoding='utf-8') as f: |
cot_file = json.load(f) |
for group in GROUPS: |
for (subject, category) in tqdm(SUBJECTS.items()): |
if args.cot_prompt_path is not None: |
description = cot_file[subject] |
else: |
description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n" |
yaml_dict = {'include': base_yaml_name, 'tag': f'mmlusr_{args.group_prefix}{group}_{category}' if args.group_prefix else f'mmlusr_{group}_{category}', 'task': f'mmlusr_{args.task_prefix}{group}_{subject}' if args.task_prefix else f'mmlusr_{group}_{subject}', 'task_alias': subject.replace('_', ' '), 'description': description, 'dataset_name': f'{group}_{subject}'} |
file_save_path = os.path.join(args.save_dir, f'{group}_{subject}.yaml') |
with open(file_save_path, 'w', encoding='utf-8') as yaml_file: |
yaml.dump(yaml_dict, yaml_file, allow_unicode=True, default_style='"') |
eval_logger.info(f'Saved YAML for {group} {subject} to {file_save_path}') |
if args.group_prefix: |
file_save_path = os.path.join(args.save_prefix_path, args.group_prefix + '.yaml') |
eval_logger.info(f'Saving benchmark config to {file_save_path}') |
with open(file_save_path, 'w', encoding='utf-8') as yaml_file: |
yaml.dump(yaml_dict, yaml_file, indent=4, default_flow_style=False) |
# File: lm-evaluation-harness-main/lm_eval/tasks/mmlusr/question_and_answer/utils.py |
import datasets |
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: |
def _helper(doc): |
answer_list = ['A', 'B', 'C', 'D'] |
answer_index = int(doc['answer']) |
answer_letter = answer_list[answer_index] |
out_doc = {'questions': doc['question'], 'choices': [doc['choice1'], doc['choice2'], doc['choice3'], doc['choice4']], 'answer': answer_letter} |
return out_doc |
return dataset.map(_helper) |
# File: lm-evaluation-harness-main/lm_eval/tasks/mmlusr/question_only/utils.py |
import datasets |
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: |
def _helper(doc): |
answer_list = ['A', 'B', 'C', 'D'] |
answer_index = int(doc['answer']) |
answer_letter = answer_list[answer_index] |
out_doc = {'questions': doc['question'], 'choices': [doc['choice1'], doc['choice2'], doc['choice3'], doc['choice4']], 'answer': answer_letter} |
return out_doc |
return dataset.map(_helper) |
# File: lm-evaluation-harness-main/lm_eval/tasks/model_written_evals/advanced_ai_risk/_generate_configs.py |
import datasets |
import yaml |
from tqdm import tqdm |
def main() -> None: |
dataset_path = 'EleutherAI/advanced_ai_risk' |
for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()): |
file_name = f'{task}.yaml' |
try: |
with open(f'{file_name}', 'w', encoding='utf-8') as f: |
f.write('# Generated by _generate_configs.py\n') |
yaml.dump({'include': '_template_yaml', 'task': f"{dataset_path.split('/')[-1]}_{task}", 'dataset_name': task}, f) |
except FileExistsError: |
pass |
if __name__ == '__main__': |
main() |
# File: lm-evaluation-harness-main/lm_eval/tasks/model_written_evals/persona/_generate_configs.py |
import datasets |
import yaml |
from tqdm import tqdm |
def main() -> None: |
dataset_path = 'EleutherAI/persona' |
for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()): |
file_name = f'{task}.yaml' |
try: |
with open(f'{file_name}', 'w', encoding='utf-8') as f: |
f.write('# Generated by _generate_configs.py\n') |
yaml.dump({'include': '_template_yaml', 'task': f"{dataset_path.split('/')[-1]}_{task}", 'dataset_name': task}, f) |
except FileExistsError: |
pass |
if __name__ == '__main__': |
main() |
# File: lm-evaluation-harness-main/lm_eval/tasks/mutual/utils.py |
import numpy as np |
def process_docs(dataset): |
def _detokenize(text): |
text = text.replace(" '", "'") |
text = text.replace(' \n', '\n') |
text = text.replace('\n ', '\n') |
text = text.replace(" n't", "n't") |
text = text.replace('`` ', '"') |
text = text.replace("''", '"') |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.