fix
Browse files
jer.py
CHANGED
@@ -14,7 +14,7 @@
|
|
14 |
"""TODO: Add a description here."""
|
15 |
|
16 |
from operator import eq
|
17 |
-
from typing import Iterable
|
18 |
|
19 |
import evaluate
|
20 |
import datasets
|
@@ -56,6 +56,8 @@ Examples:
|
|
56 |
{'recall': 0.5, 'precision': 1.0, 'f1': 0.6666666666666666}
|
57 |
"""
|
58 |
|
|
|
|
|
59 |
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
60 |
class jer(evaluate.Metric):
|
61 |
"""TODO: Short description of my evaluation module."""
|
@@ -82,18 +84,23 @@ class jer(evaluate.Metric):
|
|
82 |
|
83 |
def _download_and_prepare(self, dl_manager):
|
84 |
"""Optional: download external resources useful to compute the scores"""
|
85 |
-
# TODO: Download external resources if needed
|
86 |
pass
|
87 |
|
88 |
def _compute(self, predictions, references, eq_fn=eq):
|
89 |
"""Returns the scores"""
|
90 |
score_dicts = [
|
91 |
-
self._compute_single(prediction=prediction, reference=reference)
|
92 |
for prediction, reference in zip(predictions, references)
|
93 |
]
|
94 |
return {('mean_' + key): np.mean([scores[key] for scores in score_dicts]) for key in score_dicts[0].keys()}
|
95 |
|
96 |
-
def _compute_single(
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
reference_set = set(reference)
|
98 |
assert len(reference) == len(reference_set), f"Duplicates found in the reference list {reference}"
|
99 |
prediction_set = set(prediction)
|
|
|
14 |
"""TODO: Add a description here."""
|
15 |
|
16 |
from operator import eq
|
17 |
+
from typing import Callable, Iterable, Union
|
18 |
|
19 |
import evaluate
|
20 |
import datasets
|
|
|
56 |
{'recall': 0.5, 'precision': 1.0, 'f1': 0.6666666666666666}
|
57 |
"""
|
58 |
|
59 |
+
Triplet = Union[str, tuple, int]
|
60 |
+
|
61 |
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
62 |
class jer(evaluate.Metric):
|
63 |
"""TODO: Short description of my evaluation module."""
|
|
|
84 |
|
85 |
def _download_and_prepare(self, dl_manager):
|
86 |
"""Optional: download external resources useful to compute the scores"""
|
|
|
87 |
pass
|
88 |
|
89 |
def _compute(self, predictions, references, eq_fn=eq):
|
90 |
"""Returns the scores"""
|
91 |
score_dicts = [
|
92 |
+
self._compute_single(prediction=prediction, reference=reference, eq_fn=eq_fn)
|
93 |
for prediction, reference in zip(predictions, references)
|
94 |
]
|
95 |
return {('mean_' + key): np.mean([scores[key] for scores in score_dicts]) for key in score_dicts[0].keys()}
|
96 |
|
97 |
+
def _compute_single(
|
98 |
+
self,
|
99 |
+
*,
|
100 |
+
prediction: Iterable[Triplet],
|
101 |
+
reference: Iterable[Triplet],
|
102 |
+
eq_fn: Callable[[Triplet, Triplet], bool],
|
103 |
+
):
|
104 |
reference_set = set(reference)
|
105 |
assert len(reference) == len(reference_set), f"Duplicates found in the reference list {reference}"
|
106 |
prediction_set = set(prediction)
|