lvwerra HF staff commited on
Commit
aa6b1d6
·
1 Parent(s): 49c3678

Update Space (evaluate main: c447fc8e)

Browse files
Files changed (2) hide show
  1. precision.py +11 -26
  2. requirements.txt +1 -1
precision.py CHANGED
@@ -13,9 +13,6 @@
13
  # limitations under the License.
14
  """Precision metric."""
15
 
16
- from dataclasses import dataclass
17
- from typing import List, Optional, Union
18
-
19
  import datasets
20
  from sklearn.metrics import precision_score
21
 
@@ -105,30 +102,13 @@ _CITATION = """
105
  """
106
 
107
 
108
- @dataclass
109
- class PrecisionConfig(evaluate.info.Config):
110
-
111
- name: str = "default"
112
-
113
- pos_label: Union[str, int] = 1
114
- average: str = "binary"
115
- labels: Optional[List[str]] = None
116
- sample_weight: Optional[List[float]] = None
117
- zero_division: str = "warn"
118
-
119
-
120
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
121
  class Precision(evaluate.Metric):
122
-
123
- CONFIG_CLASS = PrecisionConfig
124
- ALLOWED_CONFIG_NAMES = ["default", "multilabel"]
125
-
126
- def _info(self, config):
127
  return evaluate.MetricInfo(
128
  description=_DESCRIPTION,
129
  citation=_CITATION,
130
  inputs_description=_KWARGS_DESCRIPTION,
131
- config=config,
132
  features=datasets.Features(
133
  {
134
  "predictions": datasets.Sequence(datasets.Value("int32")),
@@ -147,14 +127,19 @@ class Precision(evaluate.Metric):
147
  self,
148
  predictions,
149
  references,
 
 
 
 
 
150
  ):
151
  score = precision_score(
152
  references,
153
  predictions,
154
- labels=self.config.labels,
155
- pos_label=self.config.pos_label,
156
- average=self.config.average,
157
- sample_weight=self.config.sample_weight,
158
- zero_division=self.config.zero_division,
159
  )
160
  return {"precision": float(score) if score.size == 1 else score}
 
13
  # limitations under the License.
14
  """Precision metric."""
15
 
 
 
 
16
  import datasets
17
  from sklearn.metrics import precision_score
18
 
 
102
  """
103
 
104
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
106
  class Precision(evaluate.Metric):
107
+ def _info(self):
 
 
 
 
108
  return evaluate.MetricInfo(
109
  description=_DESCRIPTION,
110
  citation=_CITATION,
111
  inputs_description=_KWARGS_DESCRIPTION,
 
112
  features=datasets.Features(
113
  {
114
  "predictions": datasets.Sequence(datasets.Value("int32")),
 
127
  self,
128
  predictions,
129
  references,
130
+ labels=None,
131
+ pos_label=1,
132
+ average="binary",
133
+ sample_weight=None,
134
+ zero_division="warn",
135
  ):
136
  score = precision_score(
137
  references,
138
  predictions,
139
+ labels=labels,
140
+ pos_label=pos_label,
141
+ average=average,
142
+ sample_weight=sample_weight,
143
+ zero_division=zero_division,
144
  )
145
  return {"precision": float(score) if score.size == 1 else score}
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
- git+https://github.com/huggingface/evaluate@e4a2724377909fe2aeb4357e3971e5a569673b39
2
  sklearn
 
1
+ git+https://github.com/huggingface/evaluate@c447fc8eda9c62af501bfdc6988919571050d950
2
  sklearn