File size: 5,047 Bytes
9c80799 72ac970 9c80799 78003b9 72ac970 9c80799 72ac970 1f331a4 72ac970 1f331a4 72ac970 9c80799 1f331a4 72ac970 1f331a4 72ac970 1f331a4 72ac970 1f331a4 72ac970 1f331a4 9c80799 1f331a4 72ac970 9c80799 72ac970 78003b9 72ac970 1f331a4 72ac970 78003b9 72ac970 9c80799 1f331a4 9c80799 1f331a4 9c80799 1f331a4 9db8383 1f331a4 65b297d 9c80799 65b297d 65c885b 65b297d 3a718e9 65c885b 9c80799 65c885b 72ac970 1f331a4 65b297d 1f331a4 65b297d 72ac970 1f331a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
"""ECE metric file."""
from __future__ import annotations
from typing import TYPE_CHECKING
import datasets
import evaluate
from torch import LongTensor, Tensor
from torchmetrics.functional.classification.calibration_error import (
binary_calibration_error,
multiclass_calibration_error,
)
if TYPE_CHECKING:
from collections.abc import Iterable
_CITATION = """\
@InProceedings{huggingface:ece,
title = {Expected calibration error (ECE)},
authors={Nathan Fradet},
year={2023}
}
"""
_DESCRIPTION = """\
This metrics computes the expected calibration error (ECE).
It directly calls the torchmetrics package:
https://torchmetrics.readthedocs.io/en/stable/classification/calibration_error.html
"""
_KWARGS_DESCRIPTION = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of predictions to score. They must have a shape (N,C,...) if
multiclass, or (N,...) if binary.
references: list of reference for each prediction, with a shape (N,...).
Returns:
ece: expected calibration error
Examples:
>>> ece = evaluate.load("Natooz/ece")
>>> results = ece.compute(
... references=np.array([[0.25, 0.20, 0.55],
... [0.55, 0.05, 0.40],
... [0.10, 0.30, 0.60],
... [0.90, 0.05, 0.05]]),
... predictions=np.array(),
... num_classes=3,
... n_bins=3,
... norm="l1",
... )
>>> print(results)
{'ece': 0.2000}
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class ECE(evaluate.Metric):
"""
Module for the BinaryCalibrationError (ECE) metric of the torchmetrics package.
https://torchmetrics.readthedocs.io/en/stable/classification/calibration_error.html.
"""
def _info(self) -> evaluate.MetricInfo:
"""
Return the module info.
:return: module info.
"""
return evaluate.MetricInfo(
# This is the description that will appear on the modules page.
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("float32")),
"references": datasets.Value("int64"),
}
),
# Homepage of the module for documentation
homepage="https://huggingface.co/spaces/Natooz/ece",
# Additional links to the codebase or references
codebase_urls=[
"https://github.com/Lightning-AI/torchmetrics/blob/v0.11.4/src/torchmetrics/classification/calibration_error.py"
],
reference_urls=[
"https://torchmetrics.readthedocs.io/en/stable/classification/calibration_error.html"
],
)
def _compute(
self,
predictions: Iterable[float] | None = None,
references: Iterable[int] | None = None,
**kwargs
) -> dict[str, float]:
"""
Return the Expected Calibration Error (ECE).
See the torchmetrics documentation for more information on the method.
https://torchmetrics.readthedocs.io/en/stable/classification/calibration_error.html
predictions: (N,C,...) if multiclass or (N,...) if binary
references: (N,...).
If "num_classes" is not provided in a multiclass setting, the number maximum
label index will be used as "num_classes".
"""
# Convert the input
predictions = Tensor(predictions)
references = LongTensor(references)
# Determine number of classes / binary or multiclass
error_msg = (
"Expected to have predictions with shape (N,C,...) for multiclass or "
"(N,...) for binary, and references with shape (N,...), but got "
f"{predictions.shape} and {references.shape}"
)
binary = True
if predictions.dim() == references.dim() + 1: # multiclass
binary = False
if "num_classes" not in kwargs:
kwargs["num_classes"] = int(predictions.shape[1])
elif predictions.dim() == references.dim() and "num_classes" in kwargs:
raise ValueError(
"You gave the num_classes argument, with predictions and references "
"having the same number of dimensions. " + error_msg
)
elif predictions.dim() != references.dim():
raise ValueError("Bad input shape. " + error_msg)
# Compute the calibration
if binary:
ece = binary_calibration_error(predictions, references, **kwargs)
else:
ece = multiclass_calibration_error(predictions, references, **kwargs)
return {
"ece": float(ece),
}
|