text
stringlengths 0
15.3k
|
---|
doc: dict |
arguments: tuple |
idx: int |
metadata: Tuple[Optional[str], Optional[int], Optional[int]] = field(default_factory=lambda : (None, None, None)) |
resps: list = field(default_factory=list) |
filtered_resps: dict = field(default_factory=dict) |
task_name: Optional[str] = None |
doc_id: Optional[int] = None |
repeats: Optional[int] = None |
def __post_init__(self) -> None: |
(self.task_name, self.doc_id, self.repeats) = self.metadata |
@property |
def args(self): |
return self.arguments if isinstance(self.arguments, tuple) else (self.arguments,) |
# File: lm-evaluation-harness-main/lm_eval/api/metrics.py |
import logging |
import math |
import random |
import re |
import string |
from collections.abc import Iterable |
from typing import List |
import numpy as np |
import sacrebleu |
import sklearn.metrics |
from lm_eval.api.registry import register_aggregation, register_metric |
eval_logger = logging.getLogger('lm-eval') |
@register_aggregation('bypass') |
def bypass_agg(arr): |
return 999 |
@register_aggregation('mean') |
def mean(arr): |
return sum(arr) / len(arr) |
@register_aggregation('median') |
def median(arr): |
return arr[len(arr) // 2] |
@register_aggregation('perplexity') |
def perplexity(items): |
return math.exp(-mean(items)) |
@register_aggregation('weighted_perplexity') |
def weighted_perplexity(items): |
return math.exp(-weighted_mean(items)) |
@register_aggregation('bits_per_byte') |
def bits_per_byte(items): |
return -weighted_mean(items) / math.log(2) |
@register_aggregation('f1') |
def f1_score(items): |
unzipped_list = list(zip(*items)) |
golds = unzipped_list[0] |
preds = unzipped_list[1] |
fscore = sklearn.metrics.f1_score(golds, preds) |
return np.max(fscore) |
@register_aggregation('matthews_corrcoef') |
def matthews_corrcoef(items): |
unzipped_list = list(zip(*items)) |
golds = unzipped_list[0] |
preds = unzipped_list[1] |
return sklearn.metrics.matthews_corrcoef(golds, preds) |
@register_aggregation('bleu') |
def bleu(items): |
refs = list(zip(*items))[0] |
preds = list(zip(*items))[1] |
(refs, preds) = _sacreformat(refs, preds) |
return sacrebleu.corpus_bleu(preds, refs).score |
@register_aggregation('chrf') |
def chrf(items): |
refs = list(zip(*items))[0] |
preds = list(zip(*items))[1] |
(refs, preds) = _sacreformat(refs, preds) |
return sacrebleu.corpus_chrf(preds, refs).score |
@register_aggregation('ter') |
def ter(items): |
refs = list(zip(*items))[0] |
preds = list(zip(*items))[1] |
(refs, preds) = _sacreformat(refs, preds) |
return sacrebleu.corpus_ter(preds, refs).score |
@register_aggregation('brier_score') |
def brier_score(items): |
(gold, predictions) = list(zip(*items)) |
(bs, num_class) = np.array(predictions).shape |
gold = list(gold) |
gold_one_hot = np.eye(num_class)[gold] |
return np.mean(np.sum((predictions - gold_one_hot) ** 2, axis=1)) |
@register_metric(metric='brier_score', higher_is_better=False, output_type=['multiple_choice'], aggregation='brier_score') |