davanstrien HF staff commited on
Commit
f1ac821
·
1 Parent(s): 64047a8

Upload hipe2020.py

Browse files
Files changed (1) hide show
  1. hipe2020.py +217 -0
hipe2020.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """TODO"""
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = """\
23
+ TODO
24
+ """
25
+
26
+ _DESCRIPTION = """\
27
+ TODO
28
+ """
29
+
30
+ _BASE_URL_TRAIN_DEV = "https://raw.githubusercontent.com/impresso/CLEF-HIPE-2020/master/data/training-v1.2/"
31
+
32
+
33
+ _URLs = {
34
+ "EN": {
35
+ "dev": _BASE_URL_TRAIN_DEV + "en/HIPE-data-v1.2-dev-en.tsv?raw=true"
36
+ }, # English only has dev
37
+ "DE": {
38
+ "dev": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.2-dev-de.tsv?raw=true",
39
+ "train": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.2-train-de.tsv?raw=true",
40
+ },
41
+ "FR": {
42
+ "dev": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.2-dev-fr.tsv?raw=true",
43
+ "train": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.2-train-fr.tsv?raw=true",
44
+ },
45
+ }
46
+
47
+
48
+ class HIPE2020Config(datasets.BuilderConfig):
49
+ """BuilderConfig for HIPE2020"""
50
+
51
+ def __init__(self, data_urls, **kwargs):
52
+ """BuilderConfig for HIPE2020.
53
+
54
+ Args:
55
+ **kwargs: keyword arguments forwarded to super.
56
+ """
57
+ super(HIPE2020Config, self).__init__(**kwargs)
58
+ self.data_urls = data_urls
59
+
60
+
61
+ class Conll2003(datasets.GeneratorBasedBuilder):
62
+ """Conll2003 dataset."""
63
+
64
+ BUILDER_CONFIGS = [
65
+ HIPE2020Config(
66
+ name="en",
67
+ data_urls=_URLs["EN"],
68
+ version=datasets.Version("1.0.0"),
69
+ description="HIPE dataset covering English",
70
+ ),
71
+ HIPE2020Config(
72
+ name="de",
73
+ data_urls=_URLs["DE"],
74
+ version=datasets.Version("1.0.0"),
75
+ description="HIPE dataset covering German",
76
+ ),
77
+ HIPE2020Config(
78
+ name="fr",
79
+ data_urls=_URLs["FR"],
80
+ version=datasets.Version("1.0.0"),
81
+ description="HIPE dataset covering French",
82
+ ),
83
+ ]
84
+
85
+ def _info(self):
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=datasets.Features(
89
+ {
90
+ "id": datasets.Value("string"),
91
+ "tokens": datasets.Sequence(datasets.Value("string")),
92
+ "NE_COARSE_LIT": datasets.Sequence(
93
+ datasets.features.ClassLabel(
94
+ names=[
95
+ "O",
96
+ "B-loc",
97
+ "B-org",
98
+ "B-pers",
99
+ "B-prod",
100
+ "B-time",
101
+ "I-loc",
102
+ "I-org",
103
+ "I-pers",
104
+ "I-prod",
105
+ "I-time",
106
+ ]
107
+ )
108
+ ),
109
+ "NE_COARSE_METO_tags": datasets.Sequence(
110
+ datasets.features.ClassLabel(
111
+ names=[
112
+ "O",
113
+ "B-loc",
114
+ "B-org",
115
+ "B-pers",
116
+ "B-prod",
117
+ "B-time",
118
+ "I-loc",
119
+ "I-org",
120
+ "I-pers",
121
+ "I-prod",
122
+ "I-time",
123
+ ]
124
+ )
125
+ ),
126
+ "no_space_after": datasets.Sequence(datasets.Value("bool")),
127
+ "end_of_line": datasets.Sequence(datasets.Value("bool")),
128
+ }
129
+ ),
130
+ supervised_keys=None,
131
+ homepage="TODO",
132
+ citation=_CITATION,
133
+ )
134
+
135
+ def _split_generators(self, dl_manager):
136
+ """Returns SplitGenerators."""
137
+ downloaded_files = dl_manager.download_and_extract(self.config.data_urls)
138
+ if self.config.name != "en":
139
+ data_files = {
140
+ "train": downloaded_files["train"],
141
+ "dev": downloaded_files["dev"],
142
+ }
143
+ else:
144
+ data_files = {"dev": downloaded_files["dev"]}
145
+ if self.config.name == "en":
146
+ return [
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.VALIDATION,
149
+ gen_kwargs={"filepath": data_files["dev"]},
150
+ ),
151
+ # datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}), # TODO add test splits
152
+ ]
153
+
154
+ else:
155
+ return [
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split.TRAIN,
158
+ gen_kwargs={"filepath": data_files["train"]},
159
+ ),
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.VALIDATION,
162
+ gen_kwargs={"filepath": data_files["dev"]},
163
+ ),
164
+ ]
165
+
166
+ def _generate_examples(self, filepath):
167
+ with open(filepath, encoding="utf-8") as f:
168
+ guid = 0
169
+ tokens = []
170
+ NE_COARSE_LIT_tags = []
171
+ NE_COARSE_METO_tags = []
172
+ no_space_after = []
173
+ end_of_line = []
174
+ for line in f:
175
+ if line.startswith(
176
+ "TOKEN NE-COARSE-LIT NE-COARSE-METO NE-FINE-LIT NE-FINE-METO NE-FINE-COMP NE-NESTED NEL-LIT NEL-METO MISC"
177
+ ):
178
+ continue
179
+ if line.startswith("#") or line == "\n":
180
+ if tokens:
181
+ yield guid, {
182
+ "id": str(guid),
183
+ "tokens": tokens,
184
+ "NE_COARSE_LIT": NE_COARSE_LIT_tags,
185
+ "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
186
+ "no_space_after": no_space_after,
187
+ "end_of_line": end_of_line,
188
+ }
189
+ guid += 1
190
+ tokens = []
191
+ NE_COARSE_LIT_tags = []
192
+ NE_COARSE_METO_tags = []
193
+ no_space_after = []
194
+ end_of_line = []
195
+ else:
196
+ # HIPE 2020 tokens are tab separated
197
+ splits = line.split(
198
+ "\t"
199
+ ) # TOKEN NE-COARSE-LIT NE-COARSE-METO NE-FINE-LIT NE-FINE-METO NE-FINE-COMP NE-NESTED NEL-LIT NEL-METO MISC
200
+ tokens.append(splits[0])
201
+ NE_COARSE_LIT_tags.append(splits[1])
202
+ NE_COARSE_METO_tags.append(splits[2])
203
+ misc = splits[-1]
204
+ is_space = "NoSpaceAfter" in misc
205
+ is_end_of_line = "EndOfLine" in misc
206
+ no_space_after.append(is_space)
207
+ end_of_line.append(is_end_of_line)
208
+
209
+ # last example
210
+ yield guid, {
211
+ "id": str(guid),
212
+ "tokens": tokens,
213
+ "NE_COARSE_LIT": NE_COARSE_LIT_tags,
214
+ "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
215
+ "no_space_after": no_space_after,
216
+ "end_of_line": end_of_line,
217
+ }