gabrielaltay commited on
Commit
333b52e
1 Parent(s): fa3920f

upload hubscripts/nlm_gene_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. nlm_gene.py +265 -0
nlm_gene.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import collections
16
+ import itertools
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+ from bioc import biocxml
22
+
23
+ from .bigbiohub import kb_features
24
+ from .bigbiohub import BigBioConfig
25
+ from .bigbiohub import Tasks
26
+
27
+ _LANGUAGES = ['English']
28
+ _PUBMED = True
29
+ _LOCAL = False
30
+ _CITATION = """\
31
+ @article{islamaj2021nlm,
32
+ title = {
33
+ NLM-Gene, a richly annotated gold standard dataset for gene entities that
34
+ addresses ambiguity and multi-species gene recognition
35
+ },
36
+ author = {
37
+ Islamaj, Rezarta and Wei, Chih-Hsuan and Cissel, David and Miliaras,
38
+ Nicholas and Printseva, Olga and Rodionov, Oleg and Sekiya, Keiko and Ward,
39
+ Janice and Lu, Zhiyong
40
+ },
41
+ year = 2021,
42
+ journal = {Journal of Biomedical Informatics},
43
+ publisher = {Elsevier},
44
+ volume = 118,
45
+ pages = 103779
46
+ }
47
+ """
48
+
49
+ _DATASETNAME = "nlm_gene"
50
+ _DISPLAYNAME = "NLM-Gene"
51
+
52
+ _DESCRIPTION = """\
53
+ NLM-Gene consists of 550 PubMed articles, from 156 journals, and contains more \
54
+ than 15 thousand unique gene names, corresponding to more than five thousand \
55
+ gene identifiers (NCBI Gene taxonomy). This corpus contains gene annotation data \
56
+ from 28 organisms. The annotated articles contain on average 29 gene names, and \
57
+ 10 gene identifiers per article. These characteristics demonstrate that this \
58
+ article set is an important benchmark dataset to test the accuracy of gene \
59
+ recognition algorithms both on multi-species and ambiguous data. The NLM-Gene \
60
+ corpus will be invaluable for advancing text-mining techniques for gene \
61
+ identification tasks in biomedical text.
62
+ """
63
+
64
+ _HOMEPAGE = "https://zenodo.org/record/5089049"
65
+
66
+ _LICENSE = 'Creative Commons Zero v1.0 Universal'
67
+
68
+ _URLS = {
69
+ "source": "https://zenodo.org/record/5089049/files/NLM-Gene-Corpus.zip",
70
+ "bigbio_kb": "https://zenodo.org/record/5089049/files/NLM-Gene-Corpus.zip",
71
+ }
72
+
73
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
74
+
75
+ _SOURCE_VERSION = "1.0.0"
76
+ _BIGBIO_VERSION = "1.0.0"
77
+
78
+
79
+ class NLMGeneDataset(datasets.GeneratorBasedBuilder):
80
+ """NLM-Gene Dataset for gene entities"""
81
+
82
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
83
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
84
+
85
+ BUILDER_CONFIGS = [
86
+ BigBioConfig(
87
+ name="nlm_gene_source",
88
+ version=SOURCE_VERSION,
89
+ description="NlM Gene source schema",
90
+ schema="source",
91
+ subset_id="nlm_gene",
92
+ ),
93
+ BigBioConfig(
94
+ name="nlm_gene_bigbio_kb",
95
+ version=BIGBIO_VERSION,
96
+ description="NlM Gene BigBio schema",
97
+ schema="bigbio_kb",
98
+ subset_id="nlm_gene",
99
+ ),
100
+ ]
101
+
102
+ DEFAULT_CONFIG_NAME = "nlm_gene_source"
103
+
104
+ def _info(self) -> datasets.DatasetInfo:
105
+
106
+ if self.config.schema == "source":
107
+ if self.config.schema == "source":
108
+ # this is a variation on the BioC format
109
+ features = datasets.Features(
110
+ {
111
+ "passages": [
112
+ {
113
+ "document_id": datasets.Value("string"),
114
+ "type": datasets.Value("string"),
115
+ "text": datasets.Value("string"),
116
+ "entities": [
117
+ {
118
+ "id": datasets.Value("string"),
119
+ "offsets": [[datasets.Value("int32")]],
120
+ "text": [datasets.Value("string")],
121
+ "type": datasets.Value("string"),
122
+ "normalized": [
123
+ {
124
+ "db_name": datasets.Value("string"),
125
+ "db_id": datasets.Value("string"),
126
+ }
127
+ ],
128
+ }
129
+ ],
130
+ }
131
+ ]
132
+ }
133
+ )
134
+
135
+ elif self.config.schema == "bigbio_kb":
136
+ features = kb_features
137
+
138
+ return datasets.DatasetInfo(
139
+ description=_DESCRIPTION,
140
+ features=features,
141
+ homepage=_HOMEPAGE,
142
+ license=str(_LICENSE),
143
+ citation=_CITATION,
144
+ )
145
+
146
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
147
+ """Returns SplitGenerators."""
148
+ urls = _URLS[self.config.schema]
149
+ data_dir = Path(dl_manager.download_and_extract(urls))
150
+
151
+ return [
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TRAIN,
154
+ gen_kwargs={
155
+ "filepath": data_dir / "Corpus",
156
+ "file_name": "Pmidlist.Train.txt",
157
+ "split": "train",
158
+ },
159
+ ),
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TEST,
162
+ gen_kwargs={
163
+ "filepath": data_dir / "Corpus",
164
+ "file_name": "Pmidlist.Test.txt",
165
+ "split": "test",
166
+ },
167
+ ),
168
+ ]
169
+
170
+ @staticmethod
171
+ def _get_bioc_entity(
172
+ span, db_id_key="NCBI Gene identifier", splitters=",;|-"
173
+ ) -> dict:
174
+ """Parse BioC entity annotation."""
175
+ offsets, texts = get_texts_and_offsets_from_bioc_ann(span)
176
+ db_ids = span.infons.get(db_id_key, "-1")
177
+ # Find connector between db_ids for the normalization, if not found, use default
178
+ connector = "|"
179
+ for splitter in list(splitters):
180
+ if splitter in db_ids:
181
+ connector = splitter
182
+ normalized = [
183
+ {"db_name": db_id_key, "db_id": db_id} for db_id in db_ids.split(connector)
184
+ ]
185
+
186
+ return {
187
+ "id": span.id,
188
+ "offsets": offsets,
189
+ "text": texts,
190
+ "type": span.infons["type"],
191
+ "normalized": normalized,
192
+ }
193
+
194
+ def _generate_examples(
195
+ self, filepath: Path, file_name: str, split: str
196
+ ) -> Tuple[int, Dict]:
197
+ """Yields examples as (key, example) tuples."""
198
+
199
+ if self.config.schema == "source":
200
+ with open(filepath / file_name, encoding="utf-8") as f:
201
+ contents = f.readlines()
202
+ for uid, content in enumerate(contents):
203
+ file_id = content.replace("\n", "")
204
+ file_path = filepath / "FINAL" / f"{file_id}.BioC.XML"
205
+ reader = biocxml.BioCXMLDocumentReader(file_path.as_posix())
206
+ for xdoc in reader:
207
+ yield uid, {
208
+ "passages": [
209
+ {
210
+ "document_id": xdoc.id,
211
+ "type": passage.infons["type"],
212
+ "text": passage.text,
213
+ "entities": [
214
+ self._get_bioc_entity(span)
215
+ for span in passage.annotations
216
+ ],
217
+ }
218
+ for passage in xdoc.passages
219
+ ]
220
+ }
221
+ elif self.config.schema == "bigbio_kb":
222
+ with open(filepath / file_name, encoding="utf-8") as f:
223
+ contents = f.readlines()
224
+ uid = 0 # global unique id
225
+ for i, content in enumerate(contents):
226
+ file_id = content.replace("\n", "")
227
+ file_path = filepath / "FINAL" / f"{file_id}.BioC.XML"
228
+ reader = biocxml.BioCXMLDocumentReader(file_path.as_posix())
229
+ for xdoc in reader:
230
+ data = {
231
+ "id": uid,
232
+ "document_id": xdoc.id,
233
+ "passages": [],
234
+ "entities": [],
235
+ "relations": [],
236
+ "events": [],
237
+ "coreferences": [],
238
+ }
239
+ uid += 1
240
+
241
+ char_start = 0
242
+ # passages must not overlap and spans must cover the entire document
243
+ for passage in xdoc.passages:
244
+ offsets = [[char_start, char_start + len(passage.text)]]
245
+ char_start = char_start + len(passage.text) + 1
246
+ data["passages"].append(
247
+ {
248
+ "id": uid,
249
+ "type": passage.infons["type"],
250
+ "text": [passage.text],
251
+ "offsets": offsets,
252
+ }
253
+ )
254
+ uid += 1
255
+ # entities
256
+ for passage in xdoc.passages:
257
+ for span in passage.annotations:
258
+ ent = self._get_bioc_entity(
259
+ span, db_id_key="NCBI Gene identifier"
260
+ )
261
+ ent["id"] = uid # override BioC default id
262
+ data["entities"].append(ent)
263
+ uid += 1
264
+
265
+ yield i, data