File size: 1,942 Bytes
04a9040
18e37bf
 
 
 
 
 
 
 
 
 
 
 
04a9040
 
18e37bf
04a9040
18e37bf
 
 
 
04a9040
 
 
 
 
 
 
 
 
18e37bf
04a9040
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18e37bf
 
04a9040
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
# Copyright 2020 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" classification_report metric. """
from typing import Optional

import sklearn
import evaluate
import datasets


class ClassificationReportModule(evaluate.Metric):
    """
    Local metric used for classification task based on sklearn classiication_report().
    a classification report is a simple tool to compute multiple metrics such as:
        - accuracy
        - precision/recall/f1-score by class.
        - mean/weighted average.
    """
    def _info(self) -> evaluate.MetricInfo:
        return evaluate.MetricInfo(
            description="Metric based on sklearn classification_report() method.",
            citation="",
            inputs_description="",
            features=datasets.Features(
                {
                    "predictions": datasets.Sequence(datasets.Value("int32")),
                    "references": datasets.Sequence(datasets.Value("int32")),
                }
                if self.config_name == "multilabel"
                else {
                    "predictions": datasets.Value("int32"),
                    "references": datasets.Value("int32"),
                }
            ),
            reference_urls=[""],
        )

    def _compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
        return sklearn.metrics.classification_report(references, predictions)