Datasets:

Languages:
English
License:
gabrielaltay commited on
Commit
91edb34
·
1 Parent(s): da1e13d

upload hubscripts/msh_wsd_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. msh_wsd.py +268 -0
msh_wsd.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Evaluation of Word Sense Disambiguation methods (WSD) in the biomedical domain is difficult because the available
18
+ resources are either too small or too focused on specific types of entities (e.g. diseases or genes). We have
19
+ developed a method that can be used to automatically develop a WSD test collection using the Unified Medical Language
20
+ System (UMLS) Metathesaurus and the manual MeSH indexing of MEDLINE. The resulting dataset is called MSH WSD and
21
+ consists of 106 ambiguous abbreviations, 88 ambiguous terms and 9 which are a combination of both, for a total of 203
22
+ ambiguous words. Each instance containing the ambiguous word was assigned a CUI from the 2009AB version of the UMLS.
23
+ For each ambiguous term/abbreviation, the data set contains a maximum of 100 instances per sense obtained from
24
+ MEDLINE; totaling 37,888 ambiguity cases in 37,090 MEDLINE citations.
25
+
26
+ Note from the Author how to load dataset:
27
+ 1) Download the file MSHCorpus.zip (Link "MSHWSD Data Set") from
28
+ https://lhncbc.nlm.nih.gov/ii/areas/WSD/collaboration.html
29
+ 2) Set kwarg data_dir to the directory containing MSHCorpus.zip
30
+ """
31
+
32
+ import itertools as it
33
+ import os
34
+ import re
35
+ from dataclasses import dataclass
36
+ from pathlib import Path
37
+ from typing import Dict, List, Tuple
38
+
39
+ import datasets
40
+
41
+ from .bigbiohub import kb_features
42
+ from .bigbiohub import BigBioConfig
43
+ from .bigbiohub import Tasks
44
+
45
+ _LANGUAGES = ['English']
46
+ _PUBMED = True
47
+ _LOCAL = True
48
+ _CITATION = """\
49
+ @article{jimeno2011exploiting,
50
+ title={Exploiting MeSH indexing in MEDLINE to generate a data set for word sense disambiguation},
51
+ author={Jimeno-Yepes, Antonio J and McInnes, Bridget T and Aronson, Alan R},
52
+ journal={BMC bioinformatics},
53
+ volume={12},
54
+ number={1},
55
+ pages={1--14},
56
+ year={2011},
57
+ publisher={BioMed Central}
58
+ }
59
+ """
60
+
61
+ _DESCRIPTION = """\
62
+ Evaluation of Word Sense Disambiguation methods (WSD) in the biomedical domain is difficult because the available
63
+ resources are either too small or too focused on specific types of entities (e.g. diseases or genes). We have
64
+ developed a method that can be used to automatically develop a WSD test collection using the Unified Medical Language
65
+ System (UMLS) Metathesaurus and the manual MeSH indexing of MEDLINE. The resulting dataset is called MSH WSD and
66
+ consists of 106 ambiguous abbreviations, 88 ambiguous terms and 9 which are a combination of both, for a total of 203
67
+ ambiguous words. Each instance containing the ambiguous word was assigned a CUI from the 2009AB version of the UMLS.
68
+ For each ambiguous term/abbreviation, the data set contains a maximum of 100 instances per sense obtained from
69
+ MEDLINE; totaling 37,888 ambiguity cases in 37,090 MEDLINE citations.
70
+ """
71
+
72
+ _DATASETNAME = "msh_wsd"
73
+ _DISPLAYNAME = "MSH WSD"
74
+
75
+ _HOMEPAGE = "https://lhncbc.nlm.nih.gov/ii/areas/WSD/collaboration.html"
76
+
77
+ _LICENSE = 'UMLS - Metathesaurus License Agreement'
78
+
79
+ _URLS = {_DATASETNAME: ""}
80
+
81
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_DISAMBIGUATION]
82
+
83
+ _SOURCE_VERSION = "1.0.0"
84
+
85
+ _BIGBIO_VERSION = "1.0.0"
86
+
87
+
88
+ @dataclass
89
+ class MshWsdBigBioConfig(BigBioConfig):
90
+ schema: str = "source"
91
+ name: str = "msh_wsd_source"
92
+ version: datasets.Version = datasets.Version(_SOURCE_VERSION)
93
+ description: str = "MSH-WSD source schema"
94
+ subset_id: str = "msh_wsd"
95
+
96
+
97
+ class MshWsdDataset(datasets.GeneratorBasedBuilder):
98
+ """Biomedical Word Sense Disambiguation (WSD)."""
99
+
100
+ uid = it.count(0)
101
+
102
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
103
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
104
+
105
+ BUILDER_CONFIGS = [
106
+ MshWsdBigBioConfig(
107
+ name="msh_wsd_source",
108
+ version=SOURCE_VERSION,
109
+ description="MSH-WSD source schema",
110
+ schema="source",
111
+ subset_id="msh_wsd",
112
+ ),
113
+ MshWsdBigBioConfig(
114
+ name="msh_wsd_bigbio_kb",
115
+ version=BIGBIO_VERSION,
116
+ description="MSH-WSD BigBio schema",
117
+ schema="bigbio_kb",
118
+ subset_id="msh_wsd",
119
+ ),
120
+ ]
121
+
122
+ BUILDER_CONFIG_CLASS = MshWsdBigBioConfig
123
+
124
+ def _info(self) -> datasets.DatasetInfo:
125
+ if self.config.schema == "source":
126
+ features = datasets.Features(
127
+ {
128
+ "ambiguous_word": datasets.Value("string"),
129
+ "sentences": [
130
+ {
131
+ "pmid": datasets.Value("string"),
132
+ "text": datasets.Value("string"),
133
+ "label": datasets.Value("string"),
134
+ }
135
+ ],
136
+ "choices": [
137
+ {
138
+ "label": datasets.Value("string"),
139
+ "concept": datasets.Value("string"),
140
+ }
141
+ ],
142
+ }
143
+ )
144
+ elif self.config.schema == "bigbio_kb":
145
+ features = kb_features
146
+
147
+ return datasets.DatasetInfo(
148
+ description=_DESCRIPTION,
149
+ features=features,
150
+ homepage=_HOMEPAGE,
151
+ license=str(_LICENSE),
152
+ citation=_CITATION,
153
+ )
154
+
155
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
156
+ """Returns SplitGenerators."""
157
+
158
+ if self.config.data_dir is None:
159
+ raise ValueError(
160
+ "This is a local dataset. Please pass the data_dir kwarg to load_dataset."
161
+ )
162
+ else:
163
+ data_dir = dl_manager.download_and_extract(
164
+ os.path.join(self.config.data_dir, "MSHCorpus.zip")
165
+ )
166
+
167
+ return [
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TRAIN,
170
+ gen_kwargs={
171
+ "data_dir": Path(data_dir),
172
+ },
173
+ ),
174
+ ]
175
+
176
+ def _generate_examples(self, data_dir: Path) -> Tuple[int, Dict]:
177
+ """Yields examples as (key, example) tuples."""
178
+ data_dir = data_dir / "MSHCorpus"
179
+ concepts = data_dir / "benchmark_mesh.txt"
180
+ with concepts.open() as f:
181
+ concepts = f.readlines()
182
+ concepts = [x.strip().split("\t") for x in concepts]
183
+
184
+ concept_map = {
185
+ cuis[0]: {f"M{num}": cui for num, cui in enumerate(cuis[1:], 1)}
186
+ for cuis in concepts
187
+ }
188
+
189
+ files = list(data_dir.glob("*arff"))
190
+ for guid, file in enumerate(files):
191
+ if self.config.schema == "source":
192
+ for example in self._parse_document(concept_map, file):
193
+ yield guid, example
194
+
195
+ elif self.config.schema == "bigbio_kb":
196
+ for document in self._parse_document(concept_map, file):
197
+ for example in self._source_to_kb(document):
198
+ yield example["id"], example
199
+
200
+ def _parse_document(self, concept_map, file: Path):
201
+ with file.open(mode="r", encoding="iso-8859-1") as f:
202
+ content = f.readlines()
203
+ content = [x.strip() for x in content]
204
+
205
+ # search line number of @DATA, sometimes 6 or 7
206
+ start_l = None
207
+ for number, line in enumerate(content):
208
+ if line.startswith("@DATA"):
209
+ start_l = number + 1
210
+ break
211
+ assert start_l is not None
212
+
213
+ amb_word = file.with_suffix("").name[: -len("_pmids_tagged")]
214
+
215
+ sentences = []
216
+ for line in content[start_l:]:
217
+ # cant use , or ," ", as seperator
218
+ m_pmid = re.search("[0-9]+(?=(,))", line)
219
+ pmid = m_pmid.group()
220
+ m_label = re.search("(?<=(,))M[0-9]+", line)
221
+ label = m_label.group()
222
+
223
+ citation = line[m_pmid.span()[1] + 1 : m_label.span()[0] - 1].strip('"')
224
+
225
+ sentences.append({"pmid": pmid, "text": citation, "label": label})
226
+
227
+ yield {
228
+ "ambiguous_word": amb_word,
229
+ "sentences": sentences,
230
+ "choices": [
231
+ {"label": key, "concept": value}
232
+ for key, value in concept_map[amb_word].items()
233
+ ],
234
+ }
235
+
236
+ def _source_to_kb(self, document):
237
+ choices = {x["label"]: x["concept"] for x in document["choices"]}
238
+ for sentence in document["sentences"]:
239
+ document_ = {}
240
+ document_["events"] = []
241
+ document_["relations"] = []
242
+ document_["coreferences"] = []
243
+ document_["id"] = next(self.uid)
244
+ document_["document_id"] = sentence["pmid"]
245
+ document_["passages"] = [
246
+ {
247
+ "id": next(self.uid),
248
+ "type": "",
249
+ "text": [sentence["text"]],
250
+ "offsets": [[0, len(sentence["text"])]],
251
+ }
252
+ ]
253
+ document_["entities"] = [
254
+ {
255
+ "id": next(self.uid),
256
+ "type": "ambiguous_word",
257
+ "text": [document["ambiguous_word"]],
258
+ "offsets": [self._parse_offset(sentence["text"])],
259
+ "normalized": [
260
+ {"db_name": "MeSH", "db_id": choices[sentence["label"]]}
261
+ ],
262
+ }
263
+ ]
264
+ yield document_
265
+
266
+ def _parse_offset(self, sentence):
267
+ m = re.search("(?<=(<e>)).+(?=(</e>))", sentence)
268
+ return m.span()