Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
17f3151
·
1 Parent(s): 119f6eb

upload hubscripts/genetag_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. genetag.py +381 -0
genetag.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Named entity recognition (NER) is an important first step for text mining the biomedical literature.
18
+ Evaluating the performance of biomedical NER systems is impossible without a standardized test corpus.
19
+ The annotation of such a corpus for gene/protein name NER is a difficult process due to the complexity
20
+ of gene/protein names. We describe the construction and annotation of GENETAG, a corpus of 20K MEDLINE®
21
+ sentences for gene/protein NER. 15K GENETAG sentences were used for the BioCreAtIvE Task 1A Competition.
22
+ """
23
+
24
+
25
+ import re
26
+ from typing import Dict, List, Tuple
27
+
28
+ import datasets
29
+
30
+ from .bigbiohub import kb_features
31
+ from .bigbiohub import BigBioConfig
32
+ from .bigbiohub import Tasks
33
+
34
+ _LANGUAGES = ['English']
35
+ _PUBMED = True
36
+ _LOCAL = False
37
+ _CITATION = """\
38
+ @article{Tanabe2005,
39
+ author = {Lorraine Tanabe and Natalie Xie and Lynne H Thom and Wayne Matten and W John Wilbur},
40
+ title = {{GENETAG}: a tagged corpus for gene/protein named entity recognition},
41
+ journal = {{BMC} Bioinformatics},
42
+ volume = {6},
43
+ year = {2005},
44
+ url = {https://doi.org/10.1186/1471-2105-6-S1-S3},
45
+ doi = {10.1186/1471-2105-6-s1-s3},
46
+ biburl = {},
47
+ bibsource = {}
48
+ }
49
+ """
50
+
51
+ _DATASETNAME = "genetag"
52
+ _DISPLAYNAME = "GENETAG"
53
+
54
+ _DESCRIPTION = """\
55
+ Named entity recognition (NER) is an important first step for text mining the biomedical literature.
56
+ Evaluating the performance of biomedical NER systems is impossible without a standardized test corpus.
57
+ The annotation of such a corpus for gene/protein name NER is a difficult process due to the complexity
58
+ of gene/protein names. We describe the construction and annotation of GENETAG, a corpus of 20K MEDLINE®
59
+ sentences for gene/protein NER. 15K GENETAG sentences were used for the BioCreAtIvE Task 1A Competition..
60
+ """
61
+
62
+ _HOMEPAGE = "https://github.com/openbiocorpora/genetag"
63
+
64
+ _LICENSE = 'National Center fr Biotechnology Information PUBLIC DOMAIN NOTICE'
65
+
66
+ _BASE_URL = (
67
+ "https://raw.githubusercontent.com/openbiocorpora/genetag/master/original-data/"
68
+ )
69
+
70
+ _URLS = {
71
+ "test": {
72
+ "correct": f"{_BASE_URL}test/Correct.Data",
73
+ "gold": f"{_BASE_URL}test/Gold.format",
74
+ "text": f"{_BASE_URL}test/TOKENIZED_CORPUS",
75
+ "postagspath": f"{_BASE_URL}test/TAGGED_GENE_CORPUS",
76
+ },
77
+ "train": {
78
+ "correct": f"{_BASE_URL}train/Correct.Data",
79
+ "gold": f"{_BASE_URL}train/Gold.format",
80
+ "text": f"{_BASE_URL}train/TOKENIZED_CORPUS",
81
+ "postagspath": f"{_BASE_URL}train/TAGGED_GENE_CORPUS",
82
+ },
83
+ "round1": {
84
+ "correct": f"{_BASE_URL}round1/Correct.Data",
85
+ "gold": f"{_BASE_URL}round1/Gold.format",
86
+ "text": f"{_BASE_URL}round1/TOKENIZED_CORPUS",
87
+ "postagspath": f"{_BASE_URL}round1/TAGGED_GENE_CORPUS",
88
+ },
89
+ }
90
+
91
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
92
+
93
+ _SOURCE_VERSION = "1.0.0"
94
+
95
+ _BIGBIO_VERSION = "1.0.0"
96
+
97
+
98
+ class GenetagDataset(datasets.GeneratorBasedBuilder):
99
+ """GENETAG is a corpus of 15K MEDLINE sentences with annotations for gene/protein NER"""
100
+
101
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
102
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
103
+
104
+ BUILDER_CONFIGS = []
105
+ for annot_type in ["gold", "correct"]:
106
+ BUILDER_CONFIGS.append(
107
+ BigBioConfig(
108
+ name=f"genetag{annot_type}_source",
109
+ version=SOURCE_VERSION,
110
+ description=f"GENETAG {annot_type} annotation source schema",
111
+ schema="source",
112
+ subset_id=f"genetag{annot_type}",
113
+ )
114
+ )
115
+
116
+ BUILDER_CONFIGS.append(
117
+ BigBioConfig(
118
+ name=f"genetag{annot_type}_bigbio_kb",
119
+ version=BIGBIO_VERSION,
120
+ description=f"GENETAG {annot_type} annotation bigbio schema",
121
+ schema="bigbio_kb",
122
+ subset_id=f"genetag{annot_type}",
123
+ )
124
+ )
125
+
126
+ DEFAULT_CONFIG_NAME = "genetaggold_source"
127
+
128
+ def _info(self) -> datasets.DatasetInfo:
129
+
130
+ if self.config.schema == "source":
131
+ features = datasets.Features(
132
+ {
133
+ "doc_id": datasets.Value("string"),
134
+ "text": datasets.Value("string"),
135
+ "tokenized_text": datasets.Sequence(datasets.Value("string")),
136
+ "pos_tags": datasets.Sequence(datasets.Value("string")),
137
+ "entities": [
138
+ {
139
+ "token_offsets": datasets.Sequence(
140
+ [datasets.Value("int32")]
141
+ ),
142
+ "text": datasets.Value("string"),
143
+ "type": datasets.Value("string"),
144
+ "entity_id": datasets.Value("string"),
145
+ }
146
+ ],
147
+ }
148
+ )
149
+
150
+ elif self.config.schema == "bigbio_kb":
151
+ features = kb_features
152
+
153
+ return datasets.DatasetInfo(
154
+ description=_DESCRIPTION,
155
+ features=features,
156
+ homepage=_HOMEPAGE,
157
+ license=str(_LICENSE),
158
+ citation=_CITATION,
159
+ )
160
+
161
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
162
+ """Returns SplitGenerators."""
163
+ urls = _URLS
164
+ data_dir = dl_manager.download_and_extract(urls)
165
+ annotation_type = self.config.subset_id.split("genetag")[
166
+ -1
167
+ ] # correct or gold annotations
168
+
169
+ return [
170
+ datasets.SplitGenerator(
171
+ name=datasets.Split.TRAIN,
172
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
173
+ gen_kwargs={
174
+ "filepath": data_dir["train"]["text"],
175
+ "annotationpath": data_dir["train"][annotation_type],
176
+ "postagspath": data_dir["train"]["postagspath"],
177
+ "split": "train",
178
+ },
179
+ ),
180
+ datasets.SplitGenerator(
181
+ name=datasets.Split.TEST,
182
+ gen_kwargs={
183
+ "filepath": data_dir["test"]["text"],
184
+ "annotationpath": data_dir["test"][annotation_type],
185
+ "postagspath": data_dir["test"]["postagspath"],
186
+ "split": "test",
187
+ },
188
+ ),
189
+ datasets.SplitGenerator(
190
+ name=datasets.Split.VALIDATION,
191
+ gen_kwargs={
192
+ "filepath": data_dir["round1"]["text"],
193
+ "annotationpath": data_dir["round1"][annotation_type],
194
+ "postagspath": data_dir["round1"]["postagspath"],
195
+ "split": "dev",
196
+ },
197
+ ),
198
+ ]
199
+
200
+ def _generate_examples(
201
+ self, filepath, annotationpath, postagspath, split: str
202
+ ) -> Tuple[int, Dict]:
203
+ """Yields examples as (key, example) tuples."""
204
+ corpus, annotations = self._read_files(filepath, annotationpath, postagspath)
205
+
206
+ if self.config.schema == "source":
207
+ source_examples = self._parse_annotations_source(corpus, annotations, split)
208
+ for uid, doc_id in enumerate(source_examples):
209
+ yield uid, source_examples[doc_id]
210
+
211
+ elif self.config.schema == "bigbio_kb":
212
+ bb_kb_examples = self._parse_annotations_bb(corpus, annotations, split)
213
+ for uid, doc_id in enumerate(bb_kb_examples):
214
+ yield uid, bb_kb_examples[doc_id]
215
+
216
+ def _read_files(self, filepath, annotation_path, postagspath):
217
+ """
218
+ Reads text corpus and annotations
219
+ """
220
+ corpus, annotations = dict(), dict()
221
+
222
+ # read corpus
223
+ with open(filepath, "r") as texts:
224
+ for line in texts:
225
+ # "@@95229799480" from "@@95229799480 Cervicovaginal ..."
226
+ sentence_id = re.search(r"@@\d+", line).group(0)
227
+ # remove "/TAG" suffix and "./"
228
+ text = re.sub(r"(/TAG|\/\.)", "", line).split(sentence_id)[-1].strip()
229
+ corpus[sentence_id] = {
230
+ "text": text,
231
+ "tokenized_text": text.split(), # every token is space separated at source
232
+ }
233
+
234
+ with open(postagspath, "r") as texts:
235
+ for line in texts:
236
+ sentence_id = re.search(r"@@\d+", line).group(0)
237
+ _tags = re.findall(r"(\/[A-Z]+|\/[.,:()\"]+)", line)
238
+ pos_tags = [i.replace("/", "") for i in _tags]
239
+ corpus[sentence_id]["pos_tags"] = pos_tags
240
+
241
+ # read annotations
242
+ with open(annotation_path, "r") as annots:
243
+ for line in annots:
244
+ row = line.split("|")
245
+ if len(row) == 3:
246
+ sentence_id = row[0].strip()
247
+ annot = row[2].strip()
248
+ start = int(row[1].split()[0])
249
+ end = int(row[1].split()[1])
250
+ if sentence_id in annotations:
251
+ annotations[sentence_id].append(
252
+ {"text": annot, "token_start": start, "token_end": end}
253
+ )
254
+ else:
255
+ annotations[sentence_id] = [
256
+ {"text": annot, "token_start": start, "token_end": end}
257
+ ]
258
+
259
+ return corpus, annotations
260
+
261
+ def _parse_annotations_source(self, corpus, annotations, split) -> Dict:
262
+ """
263
+ Reads source annotations
264
+ """
265
+ # Convert to source schema
266
+ source_examples = {}
267
+ for sent_id in corpus:
268
+
269
+ text = corpus[sent_id]["text"]
270
+ source_examples[sent_id] = {
271
+ "doc_id": sent_id,
272
+ "text": text,
273
+ "tokenized_text": corpus[sent_id]["tokenized_text"],
274
+ "pos_tags": corpus[sent_id]["pos_tags"],
275
+ "entities": [],
276
+ }
277
+
278
+ if annotations.get(sent_id):
279
+ for uid, entity in enumerate(annotations[sent_id]):
280
+ source_examples[sent_id]["entities"].append(
281
+ {
282
+ "text": entity["text"],
283
+ "type": "NEWGENE",
284
+ "token_offsets": [
285
+ [entity["token_start"], entity["token_end"]]
286
+ ],
287
+ "entity_id": f"{sent_id}_{uid+1}",
288
+ }
289
+ )
290
+
291
+ return source_examples
292
+
293
+ def _parse_annotations_bb(self, corpus, annotations, split) -> Dict:
294
+ """
295
+ Convert source annotations to bigbio schema annotations
296
+ """
297
+ bb_examples = {}
298
+
299
+ for sent_id in corpus:
300
+ text = corpus[sent_id]["text"]
301
+ bb_examples[sent_id] = {
302
+ "id": sent_id,
303
+ "document_id": sent_id,
304
+ "passages": [
305
+ {
306
+ "id": f"{sent_id}_text",
307
+ "type": "sentence",
308
+ "text": [text],
309
+ "offsets": [[0, len(text)]],
310
+ }
311
+ ],
312
+ "entities": self._add_entities_bb(sent_id, annotations[sent_id], text)
313
+ if annotations.get(sent_id)
314
+ else [],
315
+ "events": [],
316
+ "coreferences": [],
317
+ "relations": [],
318
+ }
319
+
320
+ return bb_examples
321
+
322
+ def _add_entities_bb(self, doc_id, annotations, text) -> List:
323
+ """
324
+ Returns entities in bigbio schema when given annotations
325
+ (with token indices) for some text
326
+ a text. e.g: -
327
+
328
+ doc_id: @@21234669976
329
+ annotations: [{'text': 'HLH', 'token_start': 9, 'token_end': 9},
330
+ {'text': 'AP-4 HLH', 'token_start': 8, 'token_end': 9},
331
+ {'text': 'AP-4 HLH motif', 'token_start': 8, 'token_end': 10}]
332
+ text: 'Like other members of this family , the AP-4 HLH motif and the adjacent
333
+ basic domain are necessary and sufficient to confer site-specific DNA binding .'
334
+
335
+ returns: [
336
+ {'offsets': [[45, 48]],
337
+ 'text': ['HLH'],
338
+ 'type': 'NEWGENE',
339
+ 'normalized': [],
340
+ 'id': '@@21234669976_1'},
341
+ {'offsets': [[40, 48]],
342
+ 'text': ['AP-4 HLH'],
343
+ 'type': 'NEWGENE',
344
+ 'normalized': [],
345
+ 'id': '@@21234669976_2'},
346
+ {'offsets': [[40, 54]],
347
+ 'text': ['AP-4 HLH motif'],
348
+ 'type': 'NEWGENE',
349
+ 'normalized': [],
350
+ 'id': '@@21234669976_3'}
351
+ ]
352
+
353
+ Uses the given token level indices to pick correct entities
354
+ and assign character offsets
355
+ """
356
+
357
+ entities = []
358
+ for uid, entity in enumerate(annotations):
359
+ start = entity["token_start"]
360
+ end = entity["token_end"]
361
+ for i in range(len(text)):
362
+
363
+ if text[i:].startswith(entity["text"]):
364
+ # match substring using character and word index
365
+ token_end = end + 1
366
+ token_end_char = i + len(entity["text"])
367
+ if (
368
+ " ".join(text.split()[start:token_end])
369
+ == text[i:token_end_char]
370
+ ):
371
+ annot = {
372
+ "offsets": [[i, i + len(entity["text"])]],
373
+ "text": [entity["text"]],
374
+ "type": "NEWGENE",
375
+ "normalized": [],
376
+ }
377
+ if annot not in entities:
378
+ annot["id"] = f"{doc_id}_{uid+1}"
379
+ entities.append(annot)
380
+ break
381
+ return entities