gabrielaltay commited on
Commit
e85b514
1 Parent(s): 3b9f718

upload hubscripts/biomrc_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. biomrc.py +267 -0
biomrc.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ We introduce BIOMRC, a large-scale cloze-style biomedical MRC dataset. Care was taken to reduce noise, compared to the
18
+ previous BIOREAD dataset of Pappas et al. (2018). Experiments show that simple heuristics do not perform well on the
19
+ new dataset and that two neural MRC models that had been tested on BIOREAD perform much better on BIOMRC, indicating
20
+ that the new dataset is indeed less noisy or at least that its task is more feasible. Non-expert human performance is
21
+ also higher on the new dataset compared to BIOREAD, and biomedical experts perform even better. We also introduce a new
22
+ BERT-based MRC model, the best version of which substantially outperforms all other methods tested, reaching or
23
+ surpassing the accuracy of biomedical experts in some experiments. We make the new dataset available in three different
24
+ sizes, also releasing our code, and providing a leaderboard.
25
+ """
26
+
27
+ import itertools as it
28
+ import json
29
+
30
+ import datasets
31
+
32
+ from .bigbiohub import qa_features
33
+ from .bigbiohub import BigBioConfig
34
+ from .bigbiohub import Tasks
35
+
36
+ _LANGUAGES = ['English']
37
+ _PUBMED = True
38
+ _LOCAL = False
39
+ _CITATION = """\
40
+ @inproceedings{pappas-etal-2020-biomrc,
41
+ title = "{B}io{MRC}: A Dataset for Biomedical Machine Reading Comprehension",
42
+ author = "Pappas, Dimitris and
43
+ Stavropoulos, Petros and
44
+ Androutsopoulos, Ion and
45
+ McDonald, Ryan",
46
+ booktitle = "Proceedings of the 19th SIGBioMed Workshop on Biomedical Language Processing",
47
+ month = jul,
48
+ year = "2020",
49
+ address = "Online",
50
+ publisher = "Association for Computational Linguistics",
51
+ url = "https://www.aclweb.org/anthology/2020.bionlp-1.15",
52
+ pages = "140--149",
53
+ }
54
+ """
55
+
56
+ _DATASETNAME = "biomrc"
57
+ _DISPLAYNAME = "BIOMRC"
58
+
59
+ _DESCRIPTION = """\
60
+ We introduce BIOMRC, a large-scale cloze-style biomedical MRC dataset. Care was taken to reduce noise, compared to the
61
+ previous BIOREAD dataset of Pappas et al. (2018). Experiments show that simple heuristics do not perform well on the
62
+ new dataset and that two neural MRC models that had been tested on BIOREAD perform much better on BIOMRC, indicating
63
+ that the new dataset is indeed less noisy or at least that its task is more feasible. Non-expert human performance is
64
+ also higher on the new dataset compared to BIOREAD, and biomedical experts perform even better. We also introduce a new
65
+ BERT-based MRC model, the best version of which substantially outperforms all other methods tested, reaching or
66
+ surpassing the accuracy of biomedical experts in some experiments. We make the new dataset available in three different
67
+ sizes, also releasing our code, and providing a leaderboard.
68
+ """
69
+
70
+ _HOMEPAGE = "https://github.com/PetrosStav/BioMRC_code"
71
+
72
+ _LICENSE = 'License information unavailable'
73
+
74
+ _URLS = {
75
+ "large": {
76
+ "A": {
77
+ "train": "https://archive.org/download/biomrc_dataset/biomrc_large/dataset_train.json.gz",
78
+ "val": "https://archive.org/download/biomrc_dataset/biomrc_large/dataset_val.json.gz",
79
+ "test": "https://archive.org/download/biomrc_dataset/biomrc_large/dataset_test.json.gz",
80
+ },
81
+ "B": {
82
+ "train": "https://archive.org/download/biomrc_dataset/biomrc_large/dataset_train_B.json.gz",
83
+ "val": "https://archive.org/download/biomrc_dataset/biomrc_large/dataset_val_B.json.gz",
84
+ "test": "https://archive.org/download/biomrc_dataset/biomrc_large/dataset_test_B.json.gz",
85
+ },
86
+ },
87
+ "small": {
88
+ "A": {
89
+ "train": "https://archive.org/download/biomrc_dataset/biomrc_small/dataset_train_small.json.gz",
90
+ "val": "https://archive.org/download/biomrc_dataset/biomrc_small/dataset_val_small.json.gz",
91
+ "test": "https://archive.org/download/biomrc_dataset/biomrc_small/dataset_test_small.json.gz",
92
+ },
93
+ "B": {
94
+ "train": "https://archive.org/download/biomrc_dataset/biomrc_small/dataset_train_small_B.json.gz",
95
+ "val": "https://archive.org/download/biomrc_dataset/biomrc_small/dataset_val_small_B.json.gz",
96
+ "test": "https://archive.org/download/biomrc_dataset/biomrc_small/dataset_test_small_B.json.gz",
97
+ },
98
+ },
99
+ "tiny": {
100
+ "A": {
101
+ "test": "https://archive.org/download/biomrc_dataset/biomrc_tiny/dataset_tiny.json.gz"
102
+ },
103
+ "B": {
104
+ "test": "https://archive.org/download/biomrc_dataset/biomrc_tiny/dataset_tiny_B.json.gz"
105
+ },
106
+ },
107
+ }
108
+
109
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
110
+
111
+ _SOURCE_VERSION = "1.0.0"
112
+
113
+ _BIGBIO_VERSION = "1.0.0"
114
+
115
+
116
+ class BiomrcDataset(datasets.GeneratorBasedBuilder):
117
+ """BioMRC: A Dataset for Biomedical Machine Reading Comprehension"""
118
+
119
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
120
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
121
+
122
+ BUILDER_CONFIGS = []
123
+
124
+ for biomrc_setting in ["A", "B"]:
125
+ for biomrc_version in ["large", "small", "tiny"]:
126
+ subset_id = f"biomrc_{biomrc_version}_{biomrc_setting}"
127
+ BUILDER_CONFIGS.append(
128
+ BigBioConfig(
129
+ name=f"{subset_id}_source",
130
+ version=SOURCE_VERSION,
131
+ description=f"BioMRC Version {biomrc_version} Setting {biomrc_setting} source schema",
132
+ schema="source",
133
+ subset_id=subset_id,
134
+ )
135
+ )
136
+ BUILDER_CONFIGS.append(
137
+ BigBioConfig(
138
+ name=f"{subset_id}_bigbio_qa",
139
+ version=BIGBIO_VERSION,
140
+ description=f"BioMRC Version {biomrc_version} Setting {biomrc_setting} BigBio schema",
141
+ schema="bigbio_qa",
142
+ subset_id=subset_id,
143
+ )
144
+ )
145
+
146
+ DEFAULT_CONFIG_NAME = "biomrc_large_B_source"
147
+
148
+ def _info(self):
149
+ if self.config.schema == "source":
150
+ features = datasets.Features(
151
+ {
152
+ "abstract": datasets.Value("string"),
153
+ "title": datasets.Value("string"),
154
+ "entities_list": datasets.features.Sequence(
155
+ {
156
+ "pseudoidentifier": datasets.Value("string"),
157
+ "identifier": datasets.Value("string"),
158
+ "synonyms": datasets.Value("string"),
159
+ }
160
+ ),
161
+ "answer": {
162
+ "pseudoidentifier": datasets.Value("string"),
163
+ "identifier": datasets.Value("string"),
164
+ "synonyms": datasets.Value("string"),
165
+ },
166
+ }
167
+ )
168
+ elif self.config.schema == "bigbio_qa":
169
+ features = qa_features
170
+ else:
171
+ raise NotImplementedError()
172
+
173
+ return datasets.DatasetInfo(
174
+ description=_DESCRIPTION,
175
+ features=features,
176
+ homepage=_HOMEPAGE,
177
+ license=str(_LICENSE),
178
+ citation=_CITATION,
179
+ )
180
+
181
+ def _split_generators(self, dl_manager):
182
+ """Returns SplitGenerators."""
183
+
184
+ _, version, setting = self.config.subset_id.split("_")
185
+ downloaded_files = dl_manager.download_and_extract(_URLS[version][setting])
186
+
187
+ if version == "tiny":
188
+ return [
189
+ datasets.SplitGenerator(
190
+ name=datasets.Split.TRAIN,
191
+ gen_kwargs={"filepath": downloaded_files["test"]},
192
+ ),
193
+ ]
194
+ else:
195
+ return [
196
+ datasets.SplitGenerator(
197
+ name=datasets.Split.TRAIN,
198
+ gen_kwargs={"filepath": downloaded_files["train"]},
199
+ ),
200
+ datasets.SplitGenerator(
201
+ name=datasets.Split.VALIDATION,
202
+ gen_kwargs={"filepath": downloaded_files["val"]},
203
+ ),
204
+ datasets.SplitGenerator(
205
+ name=datasets.Split.TEST,
206
+ gen_kwargs={"filepath": downloaded_files["test"]},
207
+ ),
208
+ ]
209
+
210
+ def _generate_examples(self, filepath):
211
+ """Yields examples as (key, example) tuples."""
212
+
213
+ if self.config.schema == "source":
214
+ with open(filepath, encoding="utf-8") as fp:
215
+ biomrc = json.load(fp)
216
+ for _id, (ab, ti, el, an) in enumerate(
217
+ zip(
218
+ biomrc["abstracts"],
219
+ biomrc["titles"],
220
+ biomrc["entities_list"],
221
+ biomrc["answers"],
222
+ )
223
+ ):
224
+ el = [self._parse_dict_from_entity(entity) for entity in el]
225
+ an = self._parse_dict_from_entity(an)
226
+ yield _id, {
227
+ "abstract": ab,
228
+ "title": ti,
229
+ "entities_list": el,
230
+ "answer": an,
231
+ }
232
+ elif self.config.schema == "bigbio_qa":
233
+ with open(filepath, encoding="utf-8") as fp:
234
+ uid = it.count(0)
235
+ biomrc = json.load(fp)
236
+ for _id, (ab, ti, el, an) in enumerate(
237
+ zip(
238
+ biomrc["abstracts"],
239
+ biomrc["titles"],
240
+ biomrc["entities_list"],
241
+ biomrc["answers"],
242
+ )
243
+ ):
244
+ # remove info such as code, label, synonyms from answer and choices
245
+ # f.e. @entity1 :: ('9606', 'Species') :: ['patients', 'patient']"
246
+ example = {
247
+ "id": next(uid),
248
+ "question_id": next(uid),
249
+ "document_id": next(uid),
250
+ "question": ti,
251
+ "type": "multiple_choice",
252
+ "choices": [x.split(" :: ")[0] for x in el],
253
+ "context": ab,
254
+ "answer": [an.split(" :: ")[0]],
255
+ }
256
+ yield _id, example
257
+
258
+ def _parse_dict_from_entity(self, entity):
259
+ if "::" in entity:
260
+ pseudoidentifier, identifier, synonyms = entity.split(" :: ")
261
+ return {
262
+ "pseudoidentifier": pseudoidentifier,
263
+ "identifier": identifier,
264
+ "synonyms": synonyms,
265
+ }
266
+ else:
267
+ return {"pseudoidentifier": entity, "identifier": "", "synonyms": ""}