pip
Browse files
aucpr.py
CHANGED
@@ -15,7 +15,7 @@
|
|
15 |
|
16 |
import evaluate
|
17 |
import datasets
|
18 |
-
from sklearn.metrics import precision_recall_curve, auc
|
19 |
|
20 |
|
21 |
# TODO: Add BibTeX citation
|
@@ -93,9 +93,11 @@ class AUCPR(evaluate.Metric):
|
|
93 |
|
94 |
def _compute(self, references, prediction_scores):
|
95 |
"""Returns the scores"""
|
96 |
-
#
|
|
|
97 |
precision, recall, _ = precision_recall_curve(references, prediction_scores)
|
98 |
aucpr = auc(recall, precision)
|
|
|
99 |
return {
|
100 |
"aucpr": aucpr,
|
101 |
}
|
|
|
15 |
|
16 |
import evaluate
|
17 |
import datasets
|
18 |
+
from sklearn.metrics import precision_recall_curve, auc, average_precision_score
|
19 |
|
20 |
|
21 |
# TODO: Add BibTeX citation
|
|
|
93 |
|
94 |
def _compute(self, references, prediction_scores):
|
95 |
"""Returns the scores"""
|
96 |
+
# following: https://github.com/jertubiana/ScanNet/blob/7685549c8ed2159a0f441a977dec767343256292/baselines/train_handcrafted_features_PPBS.py#L47
|
97 |
+
# instead of using average_precision_score
|
98 |
precision, recall, _ = precision_recall_curve(references, prediction_scores)
|
99 |
aucpr = auc(recall, precision)
|
100 |
+
# aucpr = average_precision_score(references, prediction_scores)
|
101 |
return {
|
102 |
"aucpr": aucpr,
|
103 |
}
|