Datasets:

Languages:
English
License:
gabrielaltay commited on
Commit
d685881
·
1 Parent(s): 4c9912b

upload hubscripts/ncbi_disease_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. ncbi_disease.py +249 -0
ncbi_disease.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The NCBI disease corpus is fully annotated at the mention and concept level to serve as a research
18
+ resource for the biomedical natural language processing community.
19
+ """
20
+
21
+ import os
22
+ from typing import Dict, Iterator, List, Tuple
23
+
24
+ import datasets
25
+ from bioc import pubtator
26
+
27
+ from .bigbiohub import kb_features
28
+ from .bigbiohub import BigBioConfig
29
+ from .bigbiohub import Tasks
30
+
31
+ _LANGUAGES = ['English']
32
+ _PUBMED = True
33
+ _LOCAL = False
34
+ _CITATION = """\
35
+ @article{Dogan2014NCBIDC,
36
+ title = {NCBI disease corpus: A resource for disease name recognition and concept normalization},
37
+ author = {Rezarta Islamaj Dogan and Robert Leaman and Zhiyong Lu},
38
+ year = 2014,
39
+ journal = {Journal of biomedical informatics},
40
+ volume = 47,
41
+ pages = {1--10}
42
+ }
43
+ """
44
+
45
+ _DATASETNAME = "ncbi_disease"
46
+ _DISPLAYNAME = "NCBI Disease"
47
+
48
+ _DESCRIPTION = """\
49
+ The NCBI disease corpus is fully annotated at the mention and concept level to serve as a research
50
+ resource for the biomedical natural language processing community.
51
+ """
52
+
53
+ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/CBBresearch/Dogan/DISEASE/"
54
+ _LICENSE = 'Creative Commons Zero v1.0 Universal'
55
+
56
+ _URLS = {
57
+ _DATASETNAME: {
58
+ datasets.Split.TRAIN: "https://www.ncbi.nlm.nih.gov/CBBresearch/Dogan/DISEASE/NCBItrainset_corpus.zip",
59
+ datasets.Split.TEST: "https://www.ncbi.nlm.nih.gov/CBBresearch/Dogan/DISEASE/NCBItestset_corpus.zip",
60
+ datasets.Split.VALIDATION: "https://www.ncbi.nlm.nih.gov/CBBresearch/Dogan/DISEASE/NCBIdevelopset_corpus.zip",
61
+ }
62
+ }
63
+
64
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
65
+
66
+ _SOURCE_VERSION = "1.0.0"
67
+ _BIGBIO_VERSION = "1.0.0"
68
+
69
+
70
+ class NCBIDiseaseDataset(datasets.GeneratorBasedBuilder):
71
+ """NCBI Disease"""
72
+
73
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
74
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
75
+
76
+ BUILDER_CONFIGS = [
77
+ BigBioConfig(
78
+ name="ncbi_disease_source",
79
+ version=SOURCE_VERSION,
80
+ description="NCBI Disease source schema",
81
+ schema="source",
82
+ subset_id="ncbi_disease",
83
+ ),
84
+ BigBioConfig(
85
+ name="ncbi_disease_bigbio_kb",
86
+ version=BIGBIO_VERSION,
87
+ description="NCBI Disease BigBio schema",
88
+ schema="bigbio_kb",
89
+ subset_id="ncbi_disease",
90
+ ),
91
+ ]
92
+
93
+ DEFAULT_CONFIG_NAME = "ncbi_disease_source"
94
+
95
+ def _info(self) -> datasets.DatasetInfo:
96
+
97
+ if self.config.schema == "source":
98
+ features = datasets.Features(
99
+ {
100
+ "pmid": datasets.Value("string"),
101
+ "title": datasets.Value("string"),
102
+ "abstract": datasets.Value("string"),
103
+ "mentions": [
104
+ {
105
+ "concept_id": datasets.Value("string"),
106
+ "type": datasets.Value("string"),
107
+ "text": datasets.Value("string"),
108
+ "offsets": datasets.Sequence(datasets.Value("int32")),
109
+ }
110
+ ],
111
+ }
112
+ )
113
+
114
+ elif self.config.schema == "bigbio_kb":
115
+ features = kb_features
116
+
117
+ return datasets.DatasetInfo(
118
+ description=_DESCRIPTION,
119
+ features=features,
120
+ homepage=_HOMEPAGE,
121
+ license=str(_LICENSE),
122
+ citation=_CITATION,
123
+ )
124
+
125
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
126
+ urls = _URLS[_DATASETNAME]
127
+ data_dir = dl_manager.download_and_extract(urls)
128
+
129
+ train_filename = "NCBItrainset_corpus.txt"
130
+ test_filename = "NCBItestset_corpus.txt"
131
+ dev_filename = "NCBIdevelopset_corpus.txt"
132
+
133
+ train_filepath = os.path.join(data_dir[datasets.Split.TRAIN], train_filename)
134
+ test_filepath = os.path.join(data_dir[datasets.Split.TEST], test_filename)
135
+ dev_filepath = os.path.join(data_dir[datasets.Split.VALIDATION], dev_filename)
136
+
137
+ return [
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TRAIN,
140
+ gen_kwargs={
141
+ "filepath": train_filepath,
142
+ "split": "train",
143
+ },
144
+ ),
145
+ datasets.SplitGenerator(
146
+ name=datasets.Split.TEST,
147
+ gen_kwargs={
148
+ "filepath": test_filepath,
149
+ "split": "test",
150
+ },
151
+ ),
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.VALIDATION,
154
+ gen_kwargs={
155
+ "filepath": dev_filepath,
156
+ "split": "dev",
157
+ },
158
+ ),
159
+ ]
160
+
161
+ def _generate_examples(
162
+ self, filepath: str, split: str
163
+ ) -> Iterator[Tuple[str, Dict]]:
164
+ if self.config.schema == "source":
165
+ for i, source_example in enumerate(self._pubtator_to_source(filepath)):
166
+ # Some examples are duplicated in NCBI Disease. We have to make them unique to
167
+ # avoid and error from datasets.
168
+ yield str(i) + "_" + source_example["pmid"], source_example
169
+
170
+ elif self.config.schema == "bigbio_kb":
171
+ seen = []
172
+ for kb_example in self._pubtator_to_bigbio_kb(filepath):
173
+ # Some examples are duplicated in NCBI Disease. Avoid yielding more than once.
174
+ if kb_example["id"] in seen:
175
+ continue
176
+ yield kb_example["id"], kb_example
177
+ seen.append(kb_example["id"])
178
+
179
+ @staticmethod
180
+ def _pubtator_to_source(filepath: Dict) -> Iterator[Dict]:
181
+ with open(filepath, "r") as f:
182
+ for doc in pubtator.iterparse(f):
183
+ source_example = {
184
+ "pmid": doc.pmid,
185
+ "title": doc.title,
186
+ "abstract": doc.abstract,
187
+ "mentions": [
188
+ {
189
+ "concept_id": mention.id,
190
+ "type": mention.type,
191
+ "text": mention.text,
192
+ "offsets": [mention.start, mention.end],
193
+ }
194
+ for mention in doc.annotations
195
+ ],
196
+ }
197
+ yield source_example
198
+
199
+ @staticmethod
200
+ def _pubtator_to_bigbio_kb(filepath: Dict) -> Iterator[Dict]:
201
+ with open(filepath, "r") as f:
202
+ unified_example = {}
203
+ for doc in pubtator.iterparse(f):
204
+ unified_example["id"] = doc.pmid
205
+ unified_example["document_id"] = doc.pmid
206
+
207
+ unified_example["passages"] = [
208
+ {
209
+ "id": doc.pmid + "_title",
210
+ "type": "title",
211
+ "text": [doc.title],
212
+ "offsets": [[0, len(doc.title)]],
213
+ },
214
+ {
215
+ "id": doc.pmid + "_abstract",
216
+ "type": "abstract",
217
+ "text": [doc.abstract],
218
+ "offsets": [
219
+ [
220
+ # +1 assumes the title and abstract will be joined by a space.
221
+ len(doc.title) + 1,
222
+ len(doc.title) + 1 + len(doc.abstract),
223
+ ]
224
+ ],
225
+ },
226
+ ]
227
+
228
+ unified_entities = []
229
+ for i, entity in enumerate(doc.annotations):
230
+ # We need a unique identifier for this entity, so build it from the document id and entity id
231
+ unified_entity_id = "_".join([doc.pmid, entity.id, str(i)])
232
+ # The user can provide a callable that returns the database name.
233
+ db_name = "omim" if "OMIM" in entity.id else "mesh"
234
+ unified_entities.append(
235
+ {
236
+ "id": unified_entity_id,
237
+ "type": entity.type,
238
+ "text": [entity.text],
239
+ "offsets": [[entity.start, entity.end]],
240
+ "normalized": [{"db_name": db_name, "db_id": entity.id}],
241
+ }
242
+ )
243
+
244
+ unified_example["entities"] = unified_entities
245
+ unified_example["relations"] = []
246
+ unified_example["events"] = []
247
+ unified_example["coreferences"] = []
248
+
249
+ yield unified_example