text
stringlengths
0
15.3k
(predictions, references) = zip(*items)
(references, predictions) = (np.asarray(references), np.asarray(predictions))
return sklearn.metrics.f1_score(references, predictions)
def em(predictions, references):
_prediction = predictions[0]
(_group, _reference) = references[0].split('_')
string_label = ['False', 'True']
reference = string_label.index(_reference)
prediction = string_label.index(_prediction) if _prediction in string_label else not bool(reference)
return (_group, prediction, reference)
def agg_em(items):
grouped_values = collections.defaultdict(lambda : ([], []))
for (group, prediction, reference) in items:
grouped_values[group][0].append(reference)
grouped_values[group][1].append(prediction)
group_scores = []
for (group, (targets, predictions)) in grouped_values.items():
score = float(np.array_equal(targets, predictions))
group_scores.append(score)
return np.mean(group_scores)
# File: lm-evaluation-harness-main/lm_eval/tasks/super_glue/record/t5_utils.py
import collections
import re
import string
import numpy as np
from datasets import Dataset
from lm_eval.api.metrics import metric_max_over_ground_truths
def doc_to_text(doc):
passage = doc['passage']
passage = re.sub('(\\.|\\?|\\!|\\"|\\\')\\n@highlight\\n', '\\1 ', passage)
passage = re.sub('\\n@highlight\\n', '. ', passage)
return ' '.join(['record query:', doc['query'], 'entities:', ', '.join(doc['entities']), 'passage:', passage])
def process_docs(dataset):
def split_answers(doc):
split_doc = {**{k: [] for k in doc.keys()}}
answers = doc.pop('answers')
for (idx, answer) in enumerate(answers):
for key in split_doc.keys():
if key in doc:
split_doc[key].append(doc[key])
split_doc['answers'].append(answer)
return split_doc
dataset = dataset.map(split_answers)
new_dataset = {}
for key in dataset.features.keys():
new_dataset[key] = [x for row in dataset[key] for x in row]
return Dataset.from_dict(new_dataset)
def normalize_squad(answer):
def _normalize_answer(text, punc_chars, punc_repl):
def remove_articles(s):
return re.sub('\\b(a|an|the)\\b', ' ', s)
def replace_punctuation(s):
to_replace = set(punc_chars)
return ''.join((punc_repl if ch in to_replace else ch for ch in s))
def white_space_fix(s):
return ' '.join(s.split())
text = text.lower()
text = replace_punctuation(text)
text = remove_articles(text)
text = white_space_fix(text)
return text
return _normalize_answer(answer, punc_chars=string.punctuation, punc_repl='')
def em(predictions, references):
return (predictions[0], references[0])
def f1(predictions, references):
return (predictions[0], references[0])
def squad_em_agg(items):
def _exact_match_score(prediction, target):
return target == prediction
grouped_values = collections.defaultdict(lambda : ([], []))
for (prediction, reference) in items:
(group, reference) = reference.split('_')
grouped_values[group][0].append(normalize_squad(prediction))
grouped_values[group][1].append(normalize_squad(reference))
em = []
for group in grouped_values.keys():
(predictions, targets) = grouped_values[group]
for p in predictions:
em.append(metric_max_over_ground_truths(_exact_match_score, p, targets))
return np.mean(em)
def squad_f1_agg(items):
def _f1_score(prediction, target):
prediction_tokens = prediction.split()