text
stringlengths
0
15.3k
target_tokens = target.split()
common = collections.Counter(prediction_tokens) & collections.Counter(target_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(target_tokens)
f1 = 2 * precision * recall / (precision + recall)
return f1
grouped_values = collections.defaultdict(lambda : ([], []))
for (prediction, reference) in items:
(group, reference) = reference.split('_')
if group not in grouped_values:
grouped_values[group][0].append(normalize_squad(prediction))
grouped_values[group][1].append(normalize_squad(reference))
f1 = []
for group in grouped_values.keys():
(p, t) = grouped_values[group]
f1.append(metric_max_over_ground_truths(_f1_score, p[0], t))
return np.mean(f1)
# File: lm-evaluation-harness-main/lm_eval/tasks/super_glue/record/util.py
import datasets
import numpy as np
import transformers.data.metrics.squad_metrics as squad_metrics
from lm_eval.api.metrics import metric_max_over_ground_truths
def doc_to_text(doc):
(initial_text, *highlights) = doc['passage'].strip().split('\n@highlight\n')
text = initial_text + '\n\n'
for highlight in highlights:
text += f' - {highlight}.\n'
return text
def format_answer(query, entity):
return f' - {query}'.replace('@placeholder', entity)
def doc_to_target(doc):
return format_answer(query=doc['query'], entity=doc['answers'][0])
def doc_to_choice(doc):
return [format_answer(query=doc['query'], entity=ans) for ans in doc['entities']]
def process_docs(dataset: datasets.Dataset):
def _process_doc(doc):
return {'passage': doc['passage'], 'query': doc['query'], 'entities': sorted(list(set(doc['entities']))), 'answers': sorted(list(set(doc['answers'])))}
return dataset.map(_process_doc)
def process_results(doc, results):
max_idx = np.argmax(np.array([result[0] for result in results]))
prediction = doc['entities'][max_idx]
gold_label_set = doc['answers']
f1 = metric_max_over_ground_truths(squad_metrics.compute_f1, prediction, gold_label_set)
em = metric_max_over_ground_truths(squad_metrics.compute_exact, prediction, gold_label_set)
return {'f1': f1, 'em': em}
# File: lm-evaluation-harness-main/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py
from lm_eval.utils import general_detokenize
def default_doc_to_text(x):
raw_passage = x['text']
pre = ' '.join(raw_passage.split()[:x['span2_index']])
post = raw_passage[len(pre) + len(x['span2_text']) + 1:]
passage = general_detokenize(pre + ' *{}*'.format(x['span2_text']) + post)
noun = x['span1_text']
pronoun = x['span2_text']
text = f'Passage: {passage}\n' + f'Question: In the passage above, does the pronoun "*{pronoun}*" refer to "*{noun}*"?\n' + 'Answer:'
return text
# File: lm-evaluation-harness-main/lm_eval/tasks/super_glue/wsc/t5_utils.py
import re
from typing import List
def doc_to_text(x):
text = re.sub(' X ', ' *' + x['span2_text'] + '* ', _wsc_inputs(x))
return 'wsc: ' + text
def _wsc_inputs(x):
words = x['text'].split(' ')
assert x['span2_index'] > 0
assert x['span2_index'] < len(words)
pronoun_index = x['span2_index']
def create_input():
assert words[pronoun_index] == x['span2_text']
return ' '.join([' '.join(words[:pronoun_index]), 'X', ' '.join(words[pronoun_index + 1:])])
if x['text'] == 'The boy continued to whip the pony , and eventually the pony threw him over. John laughed out quite loud. "Good for him," he said. ':
return 'The boy continued to whip the pony , and eventually the pony threw him over. John laughed out quite loud. "Good for X ," he said.'
if x['text'] == 'When they had eventually calmed down a bit , and had gotten home, Mr. Farley put the magic pebble in an iron safe . Some day they might want to use it , but really for now, what more could they wish for?':
return 'When they had eventually calmed down a bit , and had gotten home, Mr. Farley put the magic pebble in an iron safe . Some day they might want to use X , but really for now, what more could they wish for?'
return create_input()
DETERMINERS = {'a', 'an', 'few', 'her', 'his', 'each', 'every', 'many', 'much', 'my', 'our', 'some', 'that', 'the', 'their', 'these', 'this', 'those', 'which', 'whose', 'your'}
def clean(s: str) -> str:
s = s.strip().lower()
return ' '.join([w for w in s.split(' ') if w not in DETERMINERS])
def process_results(docs: dict, resps: List):
prediction = clean(resps[0])