text
stringlengths
0
15.3k
text = text.replace(' :', ':')
text = text.replace(' ;', ';')
text = text.replace(' !', '!')
text = text.replace(' ?', '?')
text = text.replace(' ,', ',')
text = text.replace(' .', '.')
return text
def _process(doc):
return {'article': _detokenize(doc['article']), 'options': [_detokenize(option) for option in doc['options']]}
return dataset.map(_process)
def process_results(doc, results):
gold = ['A', 'B', 'C', 'D'].index(doc['answers'])
r4_1 = np.argmax(results) == gold
ranks = sorted(results, reverse=True)
r4_2 = (ranks.index(results[gold]) == 1) + r4_1
mrr = 1.0 / (ranks.index(results[gold]) + 1)
return {'r@1': r4_1, 'r@2': r4_2, 'mrr': mrr}
# File: lm-evaluation-harness-main/lm_eval/tasks/noticia/utils.py
import string
import evaluate
def clean_text(text: str) -> str:
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.replace('\n', ' ').strip()
text = ' '.join(text.split()).strip()
text = text.lower()
return text
def rouge1(items):
return items
def average_len(items):
return items
def rouge1_agg(items):
refs = list(zip(*items))[0]
refs = [[clean_text(ref)] for ref in refs]
preds = [clean_text(x) for x in list(zip(*items))[1]]
rouge_scorer = evaluate.load('rouge')
return rouge_scorer.compute(predictions=preds, references=refs)['rouge1']
def average_len_agg(items):
preds = [clean_text(x) for x in list(zip(*items))[1]]
return sum((len(x.split()) for x in preds)) / len(preds)
# File: lm-evaluation-harness-main/lm_eval/tasks/okapi/arc_multilingual/utils.py
import re
import datasets
def preprocess(text):
text = text.strip()
text = text.replace(' [title]', '. ')
text = re.sub('\\[.*?\\]', '', text)
text = text.replace(' ', ' ')
return text
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
def _process_doc(doc):
out_doc = {'id': doc['id'], 'query': 'Question: ' + preprocess(doc['instruction']) + '\nAnswer:', 'choices': [preprocess(option) for option in [doc['option_a'], doc['option_b'], doc['option_c'], doc['option_d'], doc['option_e']] if option], 'gold': ['A', 'B', 'C', 'D', 'E'].index(doc['answer'])}
return out_doc
return dataset.map(_process_doc)
# File: lm-evaluation-harness-main/lm_eval/tasks/okapi/hellaswag_multilingual/utils.py
import re
import datasets
def preprocess(text):
text = text.strip()
text = text.replace(' [title]', '. ')
text = re.sub('\\[.*?\\]', '', text)
text = text.replace(' ', ' ')
return text
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
def _process_doc(doc):
ctx = doc['ctx_a'] + ' ' + doc['ctx_b'].capitalize()
out_doc = {'query': preprocess(doc['activity_label'] + ': ' + ctx), 'choices': [preprocess(ending) for ending in doc['endings']], 'gold': int(doc['label'])}
return out_doc
return dataset.map(_process_doc)
# File: lm-evaluation-harness-main/lm_eval/tasks/okapi/mmlu_multilingual/_generate_configs.py
import datasets
import yaml
from tqdm import tqdm
def main() -> None:
dataset_path = 'alexandrainst/m_mmlu'
for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()):
file_name = f'm_mmlu_{task}.yaml'
try:
with open(f'{file_name}', 'w') as f:
f.write('# Generated by _generate_configs.py\n')
yaml.dump({'include': '_default_yaml', 'task': f"{dataset_path.split('/')[-1]}_{task}", 'dataset_name': task}, f)
except FileExistsError:
pass