murinj commited on
Commit
d275ac7
·
1 Parent(s): d27d43d
Files changed (5) hide show
  1. .gitignore +1 -0
  2. HTER.py +98 -0
  3. README.md +84 -6
  4. app.py +6 -0
  5. requirements.txt +0 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ /.idea
HTER.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """HTER metric."""
15
+
16
+ import datasets
17
+
18
+
19
+ import evaluate
20
+
21
+
22
+ _DESCRIPTION = """
23
+ HTER (Half Total Error Rate) is a metric that combines the False Accept Rate (FAR) and False Reject Rate (FRR) to provide a comprehensive evaluation of a system's performance. It can be computed with:
24
+ HTER = (FAR + FRR) / 2
25
+ Where:
26
+ FAR (False Accept Rate) = FP / (FP + TN)
27
+ FRR (False Reject Rate) = FN / (FN + TP)
28
+ TP: True positive
29
+ TN: True negative
30
+ FP: False positive
31
+ FN: False negative
32
+ """
33
+
34
+
35
+ _KWARGS_DESCRIPTION = """
36
+ Args:
37
+ predictions (`list` of `int`): Predicted labels.
38
+ references (`list` of `int`): Ground truth labels.
39
+
40
+ Returns:
41
+ HTER (`float` or `int`): HTER score. Minimum possible value is 0. Maximum possible value is 1.0, or the number of examples input.
42
+
43
+ Examples:
44
+
45
+ Example 1-A simple example
46
+ >>> hter_metric = evaluate.load("hter")
47
+ >>> results = hter_metric.compute(references=[0, 0], predictions=[0, 1])
48
+ >>> print(results)
49
+ {'HTER': 0.5}
50
+
51
+ """
52
+
53
+
54
+ _CITATION = """
55
+ }
56
+ """
57
+
58
+
59
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
60
+ class Accuracy(evaluate.Metric):
61
+ def _info(self):
62
+ return evaluate.MetricInfo(
63
+ description=_DESCRIPTION,
64
+ citation=_CITATION,
65
+ inputs_description=_KWARGS_DESCRIPTION,
66
+ features=datasets.Features(
67
+ {
68
+ "predictions": datasets.Value("int32"),
69
+ "references": datasets.Value("int32"),
70
+ }
71
+ ),
72
+ )
73
+
74
+ def _compute(self, predictions, references):
75
+ TP = 0 # True Positive
76
+ TN = 0 # True Negative
77
+ FP = 0 # False Positive
78
+ FN = 0 # False Negative
79
+
80
+ for pred, ref in zip(predictions, references):
81
+ if pred == 1 and ref == 1:
82
+ TP += 1
83
+ elif pred == 0 and ref == 0:
84
+ TN += 1
85
+ elif pred == 1 and ref == 0:
86
+ FP += 1
87
+ elif pred == 0 and ref == 1:
88
+ FN += 1
89
+
90
+ # 计算 FAR 和 FRR
91
+ FAR = FP / (FP + TN) if (FP + TN) > 0 else 0
92
+ FRR = FN / (FN + TP) if (FN + TP) > 0 else 0
93
+
94
+ # 计算 HTER
95
+ HTER = (FAR + FRR) / 2.0
96
+ return {
97
+ "HTER": HTER
98
+ }
README.md CHANGED
@@ -1,12 +1,90 @@
1
  ---
2
- title: Hter
3
- emoji: 👁
4
- colorFrom: yellow
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 5.20.0
8
  app_file: app.py
9
  pinned: false
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: HTER
3
+ emoji: 🤗
4
+ colorFrom: blue
5
+ colorTo: red
6
  sdk: gradio
7
+ sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
10
+ tags:
11
+ - evaluate
12
+ - metric
13
+ description: >-
14
+ HTER (Half Total Error Rate) is a metric that combines the False Accept Rate (FAR) and False Reject Rate (FRR) to provide a comprehensive evaluation of a system's performance. It can be computed with:
15
+ HTER = (FAR + FRR) / 2
16
+ Where:
17
+ FAR (False Accept Rate) = FP / (FP + TN)
18
+ FRR (False Reject Rate) = FN / (FN + TP)
19
+ TP: True positive
20
+ TN: True negative
21
+ FP: False positive
22
+ FN: False negative
23
  ---
24
 
25
+ # Metric Card for HTER
26
+
27
+
28
+ ## Metric Description
29
+
30
+ HTER (Half Total Error Rate) is a metric that combines the False Accept Rate (FAR) and False Reject Rate (FRR) to provide a comprehensive evaluation of a system's performance. It can be computed with:
31
+
32
+ HTER = (FAR + FRR) / 2
33
+
34
+ Where:
35
+
36
+ FAR (False Accept Rate) = FP / (FP + TN)
37
+
38
+ FRR (False Reject Rate) = FN / (FN + TP)
39
+
40
+ TP: True positive
41
+
42
+ TN: True negative
43
+
44
+ FP: False positive
45
+
46
+ FN: False negative
47
+
48
+
49
+
50
+ ## How to Use
51
+
52
+ At minimum, this metric requires predictions and references as inputs.
53
+
54
+ ```python
55
+ >>> hter_metric = evaluate.load("hter")
56
+ >>> results = hter_metric.compute(references=[0, 0], predictions=[0, 1])
57
+ >>> print(results)
58
+ {'HTER': 0.5}
59
+ ```
60
+
61
+
62
+ ### Inputs
63
+ - **predictions** (`list` of `int`): Predicted labels.
64
+ - **references** (`list` of `int`): Ground truth labels.
65
+
66
+ [//]: # (- **normalize** (`boolean`): If set to False, returns the number of correctly classified samples. Otherwise, returns the fraction of correctly classified samples. Defaults to True.)
67
+
68
+ [//]: # (- **sample_weight** (`list` of `float`): Sample weights Defaults to None.)
69
+
70
+
71
+ ### Output Values
72
+ - **HTER**(`float` or `int`): HTER score. Minimum possible value is 0. Maximum possible value is 1.0, or the number of examples input.
73
+
74
+ Output Example(s):
75
+ ```python
76
+ {'HTER': 1.0}
77
+ ```
78
+
79
+ This metric outputs a dictionary, containing the HTER score.
80
+
81
+
82
+ [//]: # (## Citation(s))
83
+
84
+ [//]: # (```bibtex)
85
+
86
+ [//]: # ()
87
+ [//]: # (```)
88
+
89
+
90
+ ## Further References
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import evaluate
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ module = evaluate.load("hter")
6
+ launch_gradio_widget(module)
requirements.txt ADDED
File without changes