maksymdolgikh commited on
Commit
94ed10f
·
1 Parent(s): 473b250

name fixes

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. seqeval_with_fbeta.py +179 -0
app.py CHANGED
@@ -5,7 +5,7 @@ from evaluate.utils import launch_gradio_widget
5
 
6
 
7
  sys.path = [p for p in sys.path if p != "/home/user/app"]
8
- module = evaluate.load("seqeval_with_fbetal")
9
  sys.path = ["/home/user/app"] + sys.path
10
 
11
  launch_gradio_widget(module)
 
5
 
6
 
7
  sys.path = [p for p in sys.path if p != "/home/user/app"]
8
+ module = evaluate.load("maksymdolgikh/seqeval_with_fbeta")
9
  sys.path = ["/home/user/app"] + sys.path
10
 
11
  launch_gradio_widget(module)
seqeval_with_fbeta.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ seqeval metric. """
15
+
16
+ import importlib
17
+ from typing import List, Optional, Union
18
+
19
+ import datasets
20
+ from seqeval_with_fbetal.metrics import accuracy_score, classification_report
21
+
22
+ import evaluate
23
+
24
+
25
+ _CITATION = """\
26
+ @inproceedings{ramshaw-marcus-1995-text,
27
+ title = "Text Chunking using Transformation-Based Learning",
28
+ author = "Ramshaw, Lance and
29
+ Marcus, Mitch",
30
+ booktitle = "Third Workshop on Very Large Corpora",
31
+ year = "1995",
32
+ url = "https://www.aclweb.org/anthology/W95-0107",
33
+ }
34
+ @misc{seqeval,
35
+ title={{seqeval}: A Python framework for sequence labeling evaluation},
36
+ url={https://github.com/chakki-works/seqeval},
37
+ note={Software available from https://github.com/chakki-works/seqeval},
38
+ author={Hiroki Nakayama},
39
+ year={2018},
40
+ }
41
+ """
42
+
43
+ _DESCRIPTION = """\
44
+ seqeval is a Python framework for sequence labeling evaluation.
45
+ seqeval can evaluate the performance of chunking tasks such as named-entity recognition, part-of-speech tagging, semantic role labeling and so on.
46
+
47
+ This is well-tested by using the Perl script conlleval, which can be used for
48
+ measuring the performance of a system that has processed the CoNLL-2000 shared task data.
49
+
50
+ seqeval supports following formats:
51
+ IOB1
52
+ IOB2
53
+ IOE1
54
+ IOE2
55
+ IOBES
56
+
57
+ See the [README.md] file at https://github.com/chakki-works/seqeval for more information.
58
+ """
59
+
60
+ _KWARGS_DESCRIPTION = """
61
+ Produces labelling scores along with its sufficient statistics
62
+ from a source against one or more references.
63
+
64
+ Args:
65
+ predictions: List of List of predicted labels (Estimated targets as returned by a tagger)
66
+ references: List of List of reference labels (Ground truth (correct) target values)
67
+ beta: Weight for the F-score
68
+ suffix: True if the IOB prefix is after type, False otherwise. default: False
69
+ scheme: Specify target tagging scheme. Should be one of ["IOB1", "IOB2", "IOE1", "IOE2", "IOBES", "BILOU"].
70
+ default: None
71
+ mode: Whether to count correct entity labels with incorrect I/B tags as true positives or not.
72
+ If you want to only count exact matches, pass mode="strict". default: None.
73
+ sample_weight: Array-like of shape (n_samples,), weights for individual samples. default: None
74
+ zero_division: Which value to substitute as a metric value when encountering zero division. Should be on of 0, 1,
75
+ "warn". "warn" acts as 0, but the warning is raised.
76
+
77
+ Returns:
78
+ 'scores': dict. Summary of the scores for overall and per type
79
+ Overall:
80
+ 'accuracy': accuracy,
81
+ 'precision': precision,
82
+ 'recall': recall,
83
+ 'f1': F1 score, also known as balanced F-score or F-measure,
84
+ 'fbeta': F-score with weight beta
85
+ Per type:
86
+ 'precision': precision,
87
+ 'recall': recall,
88
+ 'f1': F1 score, also known as balanced F-score or F-measure,
89
+ 'fbeta': F-score with weight beta
90
+ Examples:
91
+
92
+ >>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
93
+ >>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
94
+ >>> seqeval = evaluate.load("seqeval")
95
+ >>> results = seqeval.compute(predictions=predictions, references=references, beta=1.0)
96
+ >>> print(list(results.keys()))
97
+ ['MISC', 'PER', 'overall_precision', 'overall_recall', 'overall_f1', 'overall_accuracy']
98
+ >>> print(results["overall_f1"])
99
+ 0.5
100
+ >>> print(results["PER"]["f1"])
101
+ 1.0
102
+ """
103
+
104
+
105
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
106
+ class Seqeval(evaluate.Metric):
107
+ def _info(self):
108
+ return evaluate.MetricInfo(
109
+ description=_DESCRIPTION,
110
+ citation=_CITATION,
111
+ homepage="https://github.com/chakki-works/seqeval",
112
+ inputs_description=_KWARGS_DESCRIPTION,
113
+ features=datasets.Features(
114
+ {
115
+ "predictions": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
116
+ "references": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
117
+ }
118
+ ),
119
+ codebase_urls=["https://github.com/chakki-works/seqeval"],
120
+ reference_urls=["https://github.com/chakki-works/seqeval"],
121
+ )
122
+
123
+ def _compute(
124
+ self,
125
+ predictions,
126
+ references,
127
+ beta: float = 1.0,
128
+ suffix: bool = False,
129
+ scheme: Optional[str] = None,
130
+ mode: Optional[str] = None,
131
+ sample_weight: Optional[List[int]] = None,
132
+ zero_division: Union[str, int] = "warn",
133
+ ):
134
+ if scheme is not None:
135
+ try:
136
+ scheme_module = importlib.import_module("seqeval.scheme")
137
+ scheme = getattr(scheme_module, scheme)
138
+ except AttributeError:
139
+ raise ValueError(f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {scheme}")
140
+ report = classification_report(
141
+ y_true=references,
142
+ y_pred=predictions,
143
+ suffix=suffix,
144
+ output_dict=True,
145
+ scheme=scheme,
146
+ mode=mode,
147
+ sample_weight=sample_weight,
148
+ zero_division=zero_division,
149
+ )
150
+ report.pop("macro avg")
151
+ report.pop("weighted avg")
152
+
153
+ if beta != 1.0:
154
+ beta2 = beta ** 2
155
+ for k, v in report.items():
156
+ denom = beta2 * v["precision"] + v["recall"]
157
+ if denom == 0:
158
+ denom += 1
159
+ v[f"f{beta}-score"] = (1 + beta2) * v["precision"] * v["recall"] / denom
160
+
161
+ overall_score = report.pop("micro avg")
162
+
163
+ scores = {
164
+ type_name: {
165
+ "precision": score["precision"],
166
+ "recall": score["recall"],
167
+ "f1": score["f1-score"],
168
+ f"f{beta}": score[f"f{beta}-score"],
169
+ "number": score["support"],
170
+ }
171
+ for type_name, score in report.items()
172
+ }
173
+ scores["overall_precision"] = overall_score["precision"]
174
+ scores["overall_recall"] = overall_score["recall"]
175
+ scores["overall_f1"] = overall_score["f1-score"]
176
+ scores[f"overall_f{beta}"] = overall_score[f"f{beta}-score"]
177
+ scores["overall_accuracy"] = accuracy_score(y_true=references, y_pred=predictions)
178
+
179
+ return scores