Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
urbija commited on
Commit
bb2457a
1 Parent(s): 0f32130

Create few-nerd.py

Browse files
Files changed (1) hide show
  1. few-nerd.py +315 -0
few-nerd.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import datasets
4
+ from tqdm import tqdm
5
+
6
+
7
+ _CITATION = """
8
+ @inproceedings{ding2021few,
9
+ title={Few-NERD: A Few-Shot Named Entity Recognition Dataset},
10
+ author={Ding, Ning and Xu, Guangwei and Chen, Yulin, and Wang, Xiaobin and Han, Xu and Xie,
11
+ Pengjun and Zheng, Hai-Tao and Liu, Zhiyuan},
12
+ booktitle={ACL-IJCNLP},
13
+ year={2021}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """
18
+ Few-NERD is a large-scale, fine-grained manually annotated named entity recognition dataset,
19
+ which contains 8 coarse-grained types, 66 fine-grained types, 188,200 sentences, 491,711 entities
20
+ and 4,601,223 tokens. Three benchmark tasks are built, one is supervised: Few-NERD (SUP) and the
21
+ other two are few-shot: Few-NERD (INTRA) and Few-NERD (INTER).
22
+ """
23
+
24
+ # the original data files (zip of .txt) can be downloaded from tsinghua cloud
25
+ _URLs = {
26
+ "supervised": "https://cloud.tsinghua.edu.cn/f/09265750ae6340429827/?dl=1",
27
+ "intra": "https://cloud.tsinghua.edu.cn/f/a0d3efdebddd4412b07c/?dl=1",
28
+ "inter": "https://cloud.tsinghua.edu.cn/f/165693d5e68b43558f9b/?dl=1",
29
+ }
30
+
31
+ # the label ids, for coarse(NER_TAGS_DICT) and fine(FINE_NER_TAGS_DICT)
32
+ NER_TAGS_DICT = {
33
+ "O": 0,
34
+ "art": 1,
35
+ "building": 2,
36
+ "event": 3,
37
+ "location": 4,
38
+ "organization": 5,
39
+ "other": 6,
40
+ "person": 7,
41
+ "product": 8,
42
+ }
43
+
44
+ FINE_NER_TAGS_DICT = {
45
+ "O": 0,
46
+ "art-broadcastprogram": 1,
47
+ "art-film": 2,
48
+ "art-music": 3,
49
+ "art-other": 4,
50
+ "art-painting": 5,
51
+ "art-writtenart": 6,
52
+ "building-airport": 7,
53
+ "building-hospital": 8,
54
+ "building-hotel": 9,
55
+ "building-library": 10,
56
+ "building-other": 11,
57
+ "building-restaurant": 12,
58
+ "building-sportsfacility": 13,
59
+ "building-theater": 14,
60
+ "event-attack/battle/war/militaryconflict": 15,
61
+ "event-disaster": 16,
62
+ "event-election": 17,
63
+ "event-other": 18,
64
+ "event-protest": 19,
65
+ "event-sportsevent": 20,
66
+ "location-GPE": 21,
67
+ "location-bodiesofwater": 22,
68
+ "location-island": 23,
69
+ "location-mountain": 24,
70
+ "location-other": 25,
71
+ "location-park": 26,
72
+ "location-road/railway/highway/transit": 27,
73
+ "organization-company": 28,
74
+ "organization-education": 29,
75
+ "organization-government/governmentagency": 30,
76
+ "organization-media/newspaper": 31,
77
+ "organization-other": 32,
78
+ "organization-politicalparty": 33,
79
+ "organization-religion": 34,
80
+ "organization-showorganization": 35,
81
+ "organization-sportsleague": 36,
82
+ "organization-sportsteam": 37,
83
+ "other-astronomything": 38,
84
+ "other-award": 39,
85
+ "other-biologything": 40,
86
+ "other-chemicalthing": 41,
87
+ "other-currency": 42,
88
+ "other-disease": 43,
89
+ "other-educationaldegree": 44,
90
+ "other-god": 45,
91
+ "other-language": 46,
92
+ "other-law": 47,
93
+ "other-livingthing": 48,
94
+ "other-medical": 49,
95
+ "person-actor": 50,
96
+ "person-artist/author": 51,
97
+ "person-athlete": 52,
98
+ "person-director": 53,
99
+ "person-other": 54,
100
+ "person-politician": 55,
101
+ "person-scholar": 56,
102
+ "person-soldier": 57,
103
+ "product-airplane": 58,
104
+ "product-car": 59,
105
+ "product-food": 60,
106
+ "product-game": 61,
107
+ "product-other": 62,
108
+ "product-ship": 63,
109
+ "product-software": 64,
110
+ "product-train": 65,
111
+ "product-weapon": 66,
112
+ }
113
+
114
+
115
+ class FewNERDConfig(datasets.BuilderConfig):
116
+ """BuilderConfig for FewNERD"""
117
+
118
+ def __init__(self, **kwargs):
119
+ """BuilderConfig for FewNERD.
120
+
121
+ Args:
122
+ **kwargs: keyword arguments forwarded to super.
123
+ """
124
+ super(FewNERDConfig, self).__init__(**kwargs)
125
+
126
+
127
+ class FewNERD(datasets.GeneratorBasedBuilder):
128
+ BUILDER_CONFIGS = [
129
+ FewNERDConfig(name="supervised", description="Fully supervised setting."),
130
+ FewNERDConfig(
131
+ name="inter",
132
+ description="Few-shot setting. Each file contains all 8 coarse "
133
+ "types but different fine-grained types.",
134
+ ),
135
+ FewNERDConfig(
136
+ name="intra", description="Few-shot setting. Randomly split by coarse type."
137
+ ),
138
+ ]
139
+
140
+ def _info(self):
141
+ return datasets.DatasetInfo(
142
+ description=_DESCRIPTION,
143
+ features=datasets.Features(
144
+ {
145
+ "id": datasets.Value("string"),
146
+ "tokens": datasets.features.Sequence(datasets.Value("string")),
147
+ "ner_tags": datasets.features.Sequence(
148
+ datasets.features.ClassLabel(
149
+ names=[
150
+ "O",
151
+ "art",
152
+ "building",
153
+ "event",
154
+ "location",
155
+ "organization",
156
+ "other",
157
+ "person",
158
+ "product",
159
+ ]
160
+ )
161
+ ),
162
+ "fine_ner_tags": datasets.Sequence(
163
+ datasets.features.ClassLabel(
164
+ names=[
165
+ "O",
166
+ "art-broadcastprogram",
167
+ "art-film",
168
+ "art-music",
169
+ "art-other",
170
+ "art-painting",
171
+ "art-writtenart",
172
+ "building-airport",
173
+ "building-hospital",
174
+ "building-hotel",
175
+ "building-library",
176
+ "building-other",
177
+ "building-restaurant",
178
+ "building-sportsfacility",
179
+ "building-theater",
180
+ "event-attack/battle/war/militaryconflict",
181
+ "event-disaster",
182
+ "event-election",
183
+ "event-other",
184
+ "event-protest",
185
+ "event-sportsevent",
186
+ "location-GPE",
187
+ "location-bodiesofwater",
188
+ "location-island",
189
+ "location-mountain",
190
+ "location-other",
191
+ "location-park",
192
+ "location-road/railway/highway/transit",
193
+ "organization-company",
194
+ "organization-education",
195
+ "organization-government/governmentagency",
196
+ "organization-media/newspaper",
197
+ "organization-other",
198
+ "organization-politicalparty",
199
+ "organization-religion",
200
+ "organization-showorganization",
201
+ "organization-sportsleague",
202
+ "organization-sportsteam",
203
+ "other-astronomything",
204
+ "other-award",
205
+ "other-biologything",
206
+ "other-chemicalthing",
207
+ "other-currency",
208
+ "other-disease",
209
+ "other-educationaldegree",
210
+ "other-god",
211
+ "other-language",
212
+ "other-law",
213
+ "other-livingthing",
214
+ "other-medical",
215
+ "person-actor",
216
+ "person-artist/author",
217
+ "person-athlete",
218
+ "person-director",
219
+ "person-other",
220
+ "person-politician",
221
+ "person-scholar",
222
+ "person-soldier",
223
+ "product-airplane",
224
+ "product-car",
225
+ "product-food",
226
+ "product-game",
227
+ "product-other",
228
+ "product-ship",
229
+ "product-software",
230
+ "product-train",
231
+ "product-weapon",
232
+ ]
233
+ )
234
+ ),
235
+ }
236
+ ),
237
+ supervised_keys=None,
238
+ homepage="https://ningding97.github.io/fewnerd/",
239
+ citation=_CITATION,
240
+ )
241
+
242
+ def _split_generators(self, dl_manager):
243
+ """Returns SplitGenerators."""
244
+ urls_to_download = dl_manager.download_and_extract(_URLs)
245
+ return [
246
+ datasets.SplitGenerator(
247
+ name=datasets.Split.TRAIN,
248
+ gen_kwargs={
249
+ "filepath": os.path.join(
250
+ urls_to_download[self.config.name],
251
+ self.config.name,
252
+ "train.txt",
253
+ )
254
+ },
255
+ ),
256
+ datasets.SplitGenerator(
257
+ name=datasets.Split.VALIDATION,
258
+ gen_kwargs={
259
+ "filepath": os.path.join(
260
+ urls_to_download[self.config.name], self.config.name, "dev.txt"
261
+ )
262
+ },
263
+ ),
264
+ datasets.SplitGenerator(
265
+ name=datasets.Split.TEST,
266
+ gen_kwargs={
267
+ "filepath": os.path.join(
268
+ urls_to_download[self.config.name], self.config.name, "test.txt"
269
+ )
270
+ },
271
+ ),
272
+ ]
273
+
274
+ def _generate_examples(self, filepath=None):
275
+ # check file type
276
+ assert filepath[-4:] == ".txt"
277
+
278
+ num_lines = sum(1 for _ in open(filepath))
279
+ id = 0
280
+
281
+ with open(filepath, "r") as f:
282
+ tokens, ner_tags, fine_ner_tags = [], [], []
283
+ for line in tqdm(f, total=num_lines):
284
+ line = line.strip().split()
285
+
286
+ if line:
287
+ assert len(line) == 2
288
+ token, fine_ner_tag = line
289
+ ner_tag = fine_ner_tag.split("-")[0]
290
+
291
+ tokens.append(token)
292
+ ner_tags.append(NER_TAGS_DICT[ner_tag])
293
+ fine_ner_tags.append(FINE_NER_TAGS_DICT[fine_ner_tag])
294
+
295
+ elif tokens:
296
+ # organize a record to be written into json
297
+ record = {
298
+ "tokens": tokens,
299
+ "id": str(id),
300
+ "ner_tags": ner_tags,
301
+ "fine_ner_tags": fine_ner_tags,
302
+ }
303
+ tokens, ner_tags, fine_ner_tags = [], [], []
304
+ id += 1
305
+ yield record["id"], record
306
+
307
+ # take the last sentence
308
+ if tokens:
309
+ record = {
310
+ "tokens": tokens,
311
+ "id": str(id),
312
+ "ner_tags": ner_tags,
313
+ "fine_ner_tags": fine_ner_tags,
314
+ }
315
+ yield record["id"], record