text
stringlengths
0
15.3k
pass
if __name__ == '__main__':
main()
# File: lm-evaluation-harness-main/lm_eval/tasks/gpqa/generative/utils.py
import random
import re
import datasets
def preprocess(text):
if text is None:
return ' '
text = text.strip()
text = text.replace(' [title]', '. ')
text = re.sub('\\[.*?\\]', '', text)
text = text.replace(' ', ' ')
return text
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
def _process_doc(doc):
choices = [preprocess(doc['Incorrect Answer 1']), preprocess(doc['Incorrect Answer 2']), preprocess(doc['Incorrect Answer 3']), preprocess(doc['Correct Answer'])]
random.shuffle(choices)
correct_answer_index = choices.index(preprocess(doc['Correct Answer']))
out_doc = {'choice1': choices[0], 'choice2': choices[1], 'choice3': choices[2], 'choice4': choices[3], 'choices': [choices[0], choices[1], choices[2], choices[3]], 'answer': f'({chr(65 + correct_answer_index)})'}
return out_doc
return dataset.map(_process_doc)
# File: lm-evaluation-harness-main/lm_eval/tasks/gpqa/n_shot/_generate_configs.py
import yaml
from tqdm import tqdm
def main() -> None:
subset = ['extended', 'diamond', 'main']
for task in tqdm(subset):
file_name = f'gpqa_{task}_n_shot.yaml'
try:
with open(f'{file_name}', 'w') as f:
f.write('# Generated by _generate_configs.py\n')
yaml.dump({'include': '_gpqa_n_shot_yaml', 'task': f'gpqa_{task}_n_shot', 'dataset_name': f'gpqa_{task}'}, f)
except FileExistsError:
pass
if __name__ == '__main__':
main()
# File: lm-evaluation-harness-main/lm_eval/tasks/gpqa/n_shot/utils.py
import random
import re
import datasets
def preprocess(text):
if text is None:
return ' '
text = text.strip()
text = text.replace(' [title]', '. ')
text = re.sub('\\[.*?\\]', '', text)
text = text.replace(' ', ' ')
return text
rng = random.Random(42)
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
def _process_doc(doc):
choices = [preprocess(doc['Incorrect Answer 1']), preprocess(doc['Incorrect Answer 2']), preprocess(doc['Incorrect Answer 3']), preprocess(doc['Correct Answer'])]
rng.shuffle(choices)
correct_answer_index = choices.index(preprocess(doc['Correct Answer']))
out_doc = {'choice1': choices[0], 'choice2': choices[1], 'choice3': choices[2], 'choice4': choices[3], 'answer': f'({chr(65 + correct_answer_index)})'}
return out_doc
return dataset.map(_process_doc)
# File: lm-evaluation-harness-main/lm_eval/tasks/gpqa/zeroshot/_generate_configs.py
import yaml
from tqdm import tqdm
def main() -> None:
subset = ['extended', 'diamond', 'main']
setting = 'zeroshot'
for task in tqdm(subset):
file_name = f'gpqa_{task}_{setting}.yaml'
try:
with open(f'{file_name}', 'w') as f:
f.write('# Generated by _generate_configs.py\n')
yaml.dump({'include': f'_gpqa_{setting}_yaml', 'task': f'gpqa_{task}_{setting}', 'dataset_name': f'gpqa_{task}'}, f)
except FileExistsError:
pass
if __name__ == '__main__':
main()
# File: lm-evaluation-harness-main/lm_eval/tasks/gpqa/zeroshot/utils.py
import random
import re
import datasets
def preprocess(text):
if text is None:
return ' '
text = text.strip()
text = text.replace(' [title]', '. ')
text = re.sub('\\[.*?\\]', '', text)
text = text.replace(' ', ' ')