ccc / ccc.py
agkphysics's picture
Add app and module.
3db0544 unverified
# Copyright (C) 2024 Aaron Keesing
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# “Software”), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Concordance correlation coefficient"""
import datasets
import evaluate
import numpy as np
_CITATION = """\
@article{linConcordanceCorrelationCoefficient1989,
title = {A {{Concordance Correlation Coefficient}} to {{Evaluate Reproducibility}}},
author = {Lin, Lawrence I-Kuei},
year = {1989},
journal = {Biometrics},
volume = {45},
number = {1},
pages = {255--268},
publisher = {{International Biometric Society}},
issn = {0006-341X},
url = {https://www.jstor.org/stable/2532051},
doi = {10.2307/2532051}
}
"""
_DESCRIPTION = """\
A metric to measure the degree of agreement between continuous-values evaluations from two raters.
"""
# TODO: Add description of the arguments of the module here
_KWARGS_DESCRIPTION = """
Calculates the CCC between predictions and references
Args:
predictions: list of predictions to score. Each prediction
should be a floating point value.
references: list of references, one for each prediction. Each
reference should be a floating point value.
Returns:
ccc: the concordance correlation coefficient, -1 <= ccc <= 1
Examples:
>>> ccc_metric = evaluate.load("agkphysics/ccc")
>>> results = ccc_metric.compute(references=[0.2, 0.1], predictions=[0.1, 0.2])
>>> print(results)
{'ccc': -1.0}
>>> results = ccc_metric.compute(references=[0.1, 0.2], predictions=[0.1, 0.2])
>>> print(results)
{'ccc': 1.0}
>>> results = ccc_metric.compute(references=[0.1, 0.3], predictions=[0.1, 0.2])
>>> print(results)
{'ccc': 0.666666641831399}
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class CCC(evaluate.Metric):
"""Computes the CCC, concordance correlation coefficient."""
def _info(self):
return evaluate.MetricInfo(
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("float32"),
"references": datasets.Value("float32"),
}
),
homepage="https://en.wikipedia.org/wiki/Concordance_correlation_coefficient",
reference_urls=[
"https://www.jstor.org/stable/2532051",
"https://en.wikipedia.org/wiki/Concordance_correlation_coefficient",
],
)
def _compute(self, predictions, references):
"""Returns the CCC score"""
sxy = np.cov(predictions, references, ddof=0)[0, 1]
sxx = np.var(predictions, ddof=0)
syy = np.var(references, ddof=0)
mu_x = np.mean(predictions)
mu_y = np.mean(references)
ccc = 2 * sxy / (sxx + syy + (mu_x - mu_y) ** 2)
return {"ccc": ccc}