classification_report / classification_report.py
jplu's picture
Update classification_report.py
04a9040
raw
history blame
1.94 kB
# Copyright 2020 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" classification_report metric. """
from typing import Optional
import sklearn
import evaluate
import datasets
class ClassificationReportModule(evaluate.Metric):
"""
Local metric used for classification task based on sklearn classiication_report().
a classification report is a simple tool to compute multiple metrics such as:
- accuracy
- precision/recall/f1-score by class.
- mean/weighted average.
"""
def _info(self) -> evaluate.MetricInfo:
return evaluate.MetricInfo(
description="Metric based on sklearn classification_report() method.",
citation="",
inputs_description="",
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32")),
"references": datasets.Sequence(datasets.Value("int32")),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}
),
reference_urls=[""],
)
def _compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
return sklearn.metrics.classification_report(references, predictions)