text
stringlengths 0
15.3k
|
---|
if (paragraph_id, question_id) not in question_scoring_dict: |
question_scoring_dict[paragraph_id, question_id] = [] |
gold_label = doc['label'] == 1 |
question_scoring_dict[paragraph_id, question_id].append(gold_label == pred) |
acc = np.mean([int(all(x)) for x in question_scoring_dict.values()]) |
return acc |
def acc_all_stderr(items): |
question_scoring_dict = {} |
preds = list(zip(*items))[0] |
docs = list(zip(*items))[1] |
for (doc, pred) in zip(docs, preds): |
question_id = doc['idx']['question'] |
if question_id not in question_scoring_dict: |
question_scoring_dict[question_id] = [] |
gold_label = doc['label'] == 1 |
question_scoring_dict[question_id].append(gold_label == pred) |
acc = mean_stderr([int(all(x)) for x in question_scoring_dict.values()]) |
return acc |
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): |
scores_for_ground_truths = [] |
for ground_truth in ground_truths: |
score = metric_fn(prediction, ground_truth) |
scores_for_ground_truths.append(score) |
return max(scores_for_ground_truths) |
def weighted_mean(items): |
(a, b) = zip(*items) |
return sum(a) / sum(b) |
def is_non_str_iterable(obj): |
return isinstance(obj, Iterable) and (not isinstance(obj, str)) |
def _sacreformat(refs, preds): |
if not is_non_str_iterable(refs): |
refs = list(refs) |
if not is_non_str_iterable(refs[0]): |
refs = [[ref] for ref in refs] |
refs = list(zip(*refs)) |
if not is_non_str_iterable(preds): |
preds = list(preds) |
if is_non_str_iterable(preds[0]): |
assert len(preds[0]) == 1, f'Pred must be a str, was {preds[0]}' |
preds = [pred[0] for pred in preds] |
return (refs, preds) |
class _bootstrap_internal: |
def __init__(self, f, n) -> None: |
self.f = f |
self.n = n |
def __call__(self, v): |
(i, xs) = v |
rnd = random.Random() |
rnd.seed(i) |
res = [] |
for _ in range(self.n): |
res.append(self.f(rnd.choices(xs, k=len(xs)))) |
return res |
def bootstrap_stderr(f, xs, iters): |
import multiprocessing as mp |
pool = mp.Pool(mp.cpu_count()) |
res = [] |
chunk_size = min(1000, iters) |
from tqdm import tqdm |
print('bootstrapping for stddev:', f.__name__) |
for bootstrap in tqdm(pool.imap(_bootstrap_internal(f, chunk_size), [(i, xs) for i in range(iters // chunk_size)]), total=iters // chunk_size): |
res.extend(bootstrap) |
pool.close() |
return sample_stddev(res) |
def stderr_for_metric(metric, bootstrap_iters: int): |
if bootstrap_iters <= 0: |
return None |
bootstrappable = [median, matthews_corrcoef, f1_score, perplexity, bleu, chrf, ter] |
if metric in bootstrappable: |
return lambda x: bootstrap_stderr(metric, x, iters=bootstrap_iters) |
stderr = {mean: mean_stderr, acc_all: acc_all_stderr} |
return stderr.get(metric, None) |
def pooled_sample_stderr(stderrs: List[float], sizes: List[int]): |
assert len(stderrs) == len(sizes) |
pooled_sample_var = sum([(size - 1) * stderr ** 2 * size for (size, stderr) in zip(sizes, stderrs)]) / (sum(sizes) - len(sizes)) |
return np.sqrt(pooled_sample_var / sum(sizes)) |
def combined_sample_stderr(stderrs: List[float], sizes: List[int], metrics=None): |
assert metrics is not None, "Need to pass a list of each subtask's metric for this stderr aggregation" |
assert len(stderrs) == len(sizes) and len(sizes) == len(metrics) |
variance = stderrs[0] ** 2 |
curr_size = sizes[0] |
curr_score = metrics[0] |
for (stderr, size, score) in zip(stderrs[1:], sizes[1:], metrics[1:]): |
curr_score = (curr_score * curr_size + score * size) / (curr_size + size) |
variance = ((curr_size - 1) * variance + (size - 1) * stderr ** 2) / (curr_size + size - 1) + curr_size * size / ((curr_size + size) * (curr_size + size - 1)) * (curr_score - score) ** 2 |
return np.sqrt(variance) |
def aggregate_subtask_metrics(metrics, sizes, weight_by_size=True): |