text
stringlengths
0
15.3k
def process_results(doc, results):
(loglikelihood,) = results
_words = len(re.split('\\s+', doc['paragraph']))
_bytes = len(doc['paragraph'].encode('utf-8'))
return {'word_perplexity': (loglikelihood, _words), 'byte_perplexity': (loglikelihood, _bytes), 'bits_per_byte': (loglikelihood, _bytes)}
# File: lm-evaluation-harness-main/lm_eval/tasks/french_bench/utils.py
import collections
import re
import string
import datasets
import evaluate
def normalize_answer(s):
def remove_articles(text):
regex = re.compile('\\b(un|une|des|le|la|les)\\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join((ch for ch in text if ch not in exclude))
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def exact(predictions, references):
return int(normalize_answer(references[0]) == normalize_answer(predictions[0]))
def f1(predictions, references):
gold_toks = get_tokens(references[0])
pred_toks = get_tokens(predictions[0])
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = 2 * precision * recall / (precision + recall)
return f1
def rouge1(items):
return items
def rouge1_agg(items):
refs = list(zip(*items))[0]
preds = list(zip(*items))[1]
rouge_scorer = evaluate.load('rouge')
return rouge_scorer.compute(predictions=preds, references=refs)['rouge1']
def is_included(items):
if items[0] in items[1]:
return True
return False
def preprocess(text):
text = text.strip()
text = text.replace(' [title]', '. ')
text = re.sub('\\[.*?\\]', '', text)
text = text.replace(' ', ' ')
return text
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
def _process_doc(doc):
ctx = doc['ctx_a'] + ' ' + doc['ctx_b'].capitalize()
out_doc = {'query': preprocess(doc['activity_label'] + ': ' + ctx), 'choices': [preprocess(ending) for ending in doc['endings']], 'gold': int(doc['label'])}
return out_doc
return dataset.map(_process_doc)
# File: lm-evaluation-harness-main/lm_eval/tasks/glianorex/preprocess_glianorex.py
import datasets
def doc_to_text(doc) -> str:
option_choices = doc['options']
answers = ''.join((f'{k}. {v}\n' for (k, v) in option_choices.items()))
return f"Question: {doc['question']}\n{answers}Answer:"
def doc_to_target(doc) -> int:
return doc['answer_idx']
def filter_dataset(dataset: datasets.Dataset, lang: str) -> datasets.Dataset:
return dataset.filter(lambda example: example['language'].startswith(lang))
def filter_french(dataset: datasets.Dataset) -> datasets.Dataset:
return filter_dataset(dataset, 'fr')
def filter_english(dataset: datasets.Dataset) -> datasets.Dataset: