Spaces:
Runtime error
Runtime error
Commit
·
9342485
1
Parent(s):
4912e21
Fixed sklearn import and added test skeleton.
Browse files- balanced_accuracy.py +0 -34
- pytest.ini +3 -0
- test_balanced_accuracy.py +60 -0
- tests.py +0 -17
balanced_accuracy.py
CHANGED
@@ -15,11 +15,9 @@
|
|
15 |
|
16 |
import evaluate
|
17 |
import datasets
|
18 |
-
from sklearn.base import accuracy_score
|
19 |
from sklearn.metrics import balanced_accuracy_score
|
20 |
|
21 |
|
22 |
-
|
23 |
_DESCRIPTION = """
|
24 |
Balanced Accuracy is the average of recall obtained on each class. It can be computed with:
|
25 |
Balanced Accuracy = (TPR + TNR) / N
|
@@ -29,38 +27,6 @@ TNR: True negative rate
|
|
29 |
N: Number of classes
|
30 |
"""
|
31 |
|
32 |
-
|
33 |
-
_KWARGS_DESCRIPTION = """
|
34 |
-
Args:
|
35 |
-
predictions (`list` of `int`): Predicted labels.
|
36 |
-
references (`list` of `int`): Ground truth labels.
|
37 |
-
normalize (`boolean`): If set to False, returns the number of correctly classified samples. Otherwise, returns the fraction of correctly classified samples. Defaults to True.
|
38 |
-
sample_weight (`list` of `float`): Sample weights Defaults to None.
|
39 |
-
|
40 |
-
Returns:
|
41 |
-
accuracy (`float` or `int`): Accuracy score. Minimum possible value is 0. Maximum possible value is 1.0, or the number of examples input, if `normalize` is set to `True`.. A higher score means higher accuracy.
|
42 |
-
|
43 |
-
Examples:
|
44 |
-
|
45 |
-
Example 1-A simple example
|
46 |
-
>>> accuracy_metric = evaluate.load("accuracy")
|
47 |
-
>>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0])
|
48 |
-
>>> print(results)
|
49 |
-
{'accuracy': 0.5}
|
50 |
-
|
51 |
-
Example 2-The same as Example 1, except with `normalize` set to `False`.
|
52 |
-
>>> accuracy_metric = evaluate.load("accuracy")
|
53 |
-
>>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0], normalize=False)
|
54 |
-
>>> print(results)
|
55 |
-
{'accuracy': 3.0}
|
56 |
-
|
57 |
-
Example 3-The same as Example 1, except with `sample_weight` set.
|
58 |
-
>>> accuracy_metric = evaluate.load("accuracy")
|
59 |
-
>>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0], sample_weight=[0.5, 2, 0.7, 0.5, 9, 0.4])
|
60 |
-
>>> print(results)
|
61 |
-
{'accuracy': 0.8778625954198473}
|
62 |
-
"""
|
63 |
-
|
64 |
_KWARGS_DESCRIPTION = """
|
65 |
Args:
|
66 |
predictions (`list` of `int`): Predicted labels.
|
|
|
15 |
|
16 |
import evaluate
|
17 |
import datasets
|
|
|
18 |
from sklearn.metrics import balanced_accuracy_score
|
19 |
|
20 |
|
|
|
21 |
_DESCRIPTION = """
|
22 |
Balanced Accuracy is the average of recall obtained on each class. It can be computed with:
|
23 |
Balanced Accuracy = (TPR + TNR) / N
|
|
|
27 |
N: Number of classes
|
28 |
"""
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
_KWARGS_DESCRIPTION = """
|
31 |
Args:
|
32 |
predictions (`list` of `int`): Predicted labels.
|
pytest.ini
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[pytest]
|
2 |
+
testpaths = ./
|
3 |
+
python_files = test_*.py
|
test_balanced_accuracy.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
test_cases = [
|
2 |
+
{
|
3 |
+
"predictions": [0, 1, 0, 1, 0, 1],
|
4 |
+
"references": [0, 1, 0, 1, 0, 1],
|
5 |
+
"sample_weight": None,
|
6 |
+
"adjusted": False,
|
7 |
+
"result": {"balanced_accuracy": 0}
|
8 |
+
},
|
9 |
+
{
|
10 |
+
"predictions": [0, 0, 1, 1, 1, 1],
|
11 |
+
"references": [0, 0, 0, 0, 1, 1],
|
12 |
+
"sample_weight": None,
|
13 |
+
"adjusted": False,
|
14 |
+
"result": {"balanced_accuracy": 0}
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"predictions": [0, 1, 1, 0, 1, 2],
|
18 |
+
"references": [0, 1, 2, 0, 1, 2],
|
19 |
+
"sample_weight": None,
|
20 |
+
"adjusted": False,
|
21 |
+
"result": {"balanced_accuracy": 0}
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"predictions": [0, 0, 1, 2, 1, 2],
|
25 |
+
"references": [0, 0, 0, 0, 1, 2],
|
26 |
+
"sample_weight": None,
|
27 |
+
"adjusted": False,
|
28 |
+
"result": {"balanced_accuracy": 0}
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"predictions": [0, 1, 1, 0, 0, 1],
|
32 |
+
"references": [0, 1, 0, 1, 0, 1],
|
33 |
+
"sample_weight": [0.5, 0.7, 0.8, 0.9, 1.0, 0.6],
|
34 |
+
"adjusted": False,
|
35 |
+
"result": {"balanced_accuracy": 0}
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"predictions": [0, 1, 1, 0, 0, 1],
|
39 |
+
"references": [0, 1, 0, 1, 0, 1],
|
40 |
+
"sample_weight": None,
|
41 |
+
"adjusted": True,
|
42 |
+
"result": {"balanced_accuracy": 0}
|
43 |
+
},
|
44 |
+
]
|
45 |
+
|
46 |
+
import pytest
|
47 |
+
from evaluate import load
|
48 |
+
from sklearn.metrics import balanced_accuracy_score
|
49 |
+
|
50 |
+
@pytest.mark.parametrize("test_case", test_cases)
|
51 |
+
def test_balanced_accuracy(test_case):
|
52 |
+
metric = load("hyperml/balanced_accuracy")
|
53 |
+
result = metric.compute(
|
54 |
+
predictions=test_case["predictions"],
|
55 |
+
references=test_case["references"],
|
56 |
+
sample_weight=test_case["sample_weight"],
|
57 |
+
adjusted=test_case["adjusted"]
|
58 |
+
)
|
59 |
+
assert result["balanced_accuracy"] == balanced_accuracy_score(y_pred=test_case["predictions"], y_true=test_case["references"], sample_weight=test_case["sample_weight"], adjusted=test_case["adjusted"])
|
60 |
+
assert result == test_case["result"]
|
tests.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
test_cases = [
|
2 |
-
{
|
3 |
-
"predictions": [0, 0],
|
4 |
-
"references": [1, 1],
|
5 |
-
"result": {"metric_score": 0}
|
6 |
-
},
|
7 |
-
{
|
8 |
-
"predictions": [1, 1],
|
9 |
-
"references": [1, 1],
|
10 |
-
"result": {"metric_score": 1}
|
11 |
-
},
|
12 |
-
{
|
13 |
-
"predictions": [1, 0],
|
14 |
-
"references": [1, 1],
|
15 |
-
"result": {"metric_score": 0.5}
|
16 |
-
}
|
17 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|