Datasets:

Languages:
English
License:
gabrielaltay commited on
Commit
a4a0452
·
1 Parent(s): 0d00c73

upload hubscripts/verspoor_2013_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. verspoor_2013.py +266 -0
verspoor_2013.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ This dataset contains annotations for a small corpus of full text journal
18
+ publications on the subject of inherited colorectal cancer. It is suitable for
19
+ Named Entity Recognition and Relation Extraction tasks. It uses the Variome
20
+ Annotation Schema, a schema that aims to capture the core concepts and
21
+ relations relevant to cataloguing and interpreting human genetic variation and
22
+ its relationship to disease, as described in the published literature. The
23
+ schema was inspired by the needs of the database curators of the International
24
+ Society for Gastrointestinal Hereditary Tumours (InSiGHT) database, but is
25
+ intended to have application to genetic variation information in a range of
26
+ diseases.
27
+ """
28
+
29
+ from pathlib import Path
30
+ from shutil import rmtree
31
+ from typing import Dict, List, Tuple
32
+
33
+ import datasets
34
+
35
+ from .bigbiohub import kb_features
36
+ from .bigbiohub import BigBioConfig
37
+ from .bigbiohub import Tasks
38
+
39
+ _LANGUAGES = ['English']
40
+ _PUBMED = True
41
+ _LOCAL = False
42
+ _CITATION = """\
43
+ @article{verspoor2013annotating,
44
+ title = {Annotating the biomedical literature for the human variome},
45
+ author = {
46
+ Verspoor, Karin and Jimeno Yepes, Antonio and Cavedon, Lawrence and
47
+ McIntosh, Tara and Herten-Crabb, Asha and Thomas, Zo{"e} and Plazzer,
48
+ John-Paul
49
+ },
50
+ year = 2013,
51
+ journal = {Database},
52
+ publisher = {Oxford Academic},
53
+ volume = 2013
54
+ }
55
+ """
56
+
57
+ _DATASETNAME = "verspoor_2013"
58
+ _DISPLAYNAME = "Verspoor 2013"
59
+
60
+ _DESCRIPTION = """\
61
+ This dataset contains annotations for a small corpus of full text journal \
62
+ publications on the subject of inherited colorectal cancer. It is suitable for \
63
+ Named Entity Recognition and Relation Extraction tasks. It uses the Variome \
64
+ Annotation Schema, a schema that aims to capture the core concepts and \
65
+ relations relevant to cataloguing and interpreting human genetic variation and \
66
+ its relationship to disease, as described in the published literature. The \
67
+ schema was inspired by the needs of the database curators of the International \
68
+ Society for Gastrointestinal Hereditary Tumours (InSiGHT) database, but is \
69
+ intended to have application to genetic variation information in a range of \
70
+ diseases.
71
+ """
72
+
73
+
74
+ _HOMEPAGE = "NA"
75
+
76
+ _LICENSE = 'License information unavailable'
77
+
78
+ _URLS = ["http://github.com/rockt/SETH/zipball/master/"]
79
+
80
+ _SUPPORTED_TASKS = [
81
+ Tasks.NAMED_ENTITY_RECOGNITION,
82
+ Tasks.RELATION_EXTRACTION,
83
+ ]
84
+
85
+ _SOURCE_VERSION = "1.0.0"
86
+
87
+ _BIGBIO_VERSION = "1.0.0"
88
+
89
+
90
+ class Verspoor2013Dataset(datasets.GeneratorBasedBuilder):
91
+ """\
92
+ This dataset contains annotations for a small corpus of full text journal publications
93
+ on the subject of inherited colorectal cancer. It is suitable for Named Entity Recognition and
94
+ Relation Extraction tasks. It uses the Variome Annotation Schema, a schema that aims to
95
+ capture the core concepts and relations relevant to cataloguing and interpreting human
96
+ genetic variation and its relationship to disease, as described in the published literature.
97
+ The schema was inspired by the needs of the database curators of the International Society
98
+ for Gastrointestinal Hereditary Tumours (InSiGHT) database, but is intended to have
99
+ application to genetic variation information in a range of diseases.
100
+ """
101
+
102
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
103
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
104
+
105
+ BUILDER_CONFIGS = [
106
+ BigBioConfig(
107
+ name="verspoor_2013_source",
108
+ version=SOURCE_VERSION,
109
+ description="verspoor_2013 source schema",
110
+ schema="source",
111
+ subset_id="verspoor_2013",
112
+ ),
113
+ BigBioConfig(
114
+ name="verspoor_2013_bigbio_kb",
115
+ version=BIGBIO_VERSION,
116
+ description="verspoor_2013 BigBio schema",
117
+ schema="bigbio_kb",
118
+ subset_id="verspoor_2013",
119
+ ),
120
+ ]
121
+
122
+ DEFAULT_CONFIG_NAME = "verspoor_2013_source"
123
+
124
+ def _info(self) -> datasets.DatasetInfo:
125
+
126
+ if self.config.schema == "source":
127
+ features = datasets.Features(
128
+ {
129
+ "id": datasets.Value("string"),
130
+ "document_id": datasets.Value("string"),
131
+ "text": datasets.Value("string"),
132
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
133
+ {
134
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
135
+ "text": datasets.Sequence(datasets.Value("string")),
136
+ "type": datasets.Value("string"),
137
+ "id": datasets.Value("string"),
138
+ }
139
+ ],
140
+ "events": [ # E line in brat
141
+ {
142
+ "trigger": datasets.Value(
143
+ "string"
144
+ ), # refers to the text_bound_annotation of the trigger,
145
+ "id": datasets.Value("string"),
146
+ "type": datasets.Value("string"),
147
+ "arguments": datasets.Sequence(
148
+ {
149
+ "role": datasets.Value("string"),
150
+ "ref_id": datasets.Value("string"),
151
+ }
152
+ ),
153
+ }
154
+ ],
155
+ "relations": [ # R line in brat
156
+ {
157
+ "id": datasets.Value("string"),
158
+ "head": {
159
+ "ref_id": datasets.Value("string"),
160
+ "role": datasets.Value("string"),
161
+ },
162
+ "tail": {
163
+ "ref_id": datasets.Value("string"),
164
+ "role": datasets.Value("string"),
165
+ },
166
+ "type": datasets.Value("string"),
167
+ }
168
+ ],
169
+ "equivalences": [ # Equiv line in brat
170
+ {
171
+ "id": datasets.Value("string"),
172
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
173
+ }
174
+ ],
175
+ "attributes": [ # M or A lines in brat
176
+ {
177
+ "id": datasets.Value("string"),
178
+ "type": datasets.Value("string"),
179
+ "ref_id": datasets.Value("string"),
180
+ "value": datasets.Value("string"),
181
+ }
182
+ ],
183
+ "normalizations": [ # N lines in brat
184
+ {
185
+ "id": datasets.Value("string"),
186
+ "type": datasets.Value("string"),
187
+ "ref_id": datasets.Value("string"),
188
+ "resource_name": datasets.Value(
189
+ "string"
190
+ ), # Name of the resource, e.g. "Wikipedia"
191
+ "cuid": datasets.Value(
192
+ "string"
193
+ ), # ID in the resource, e.g. 534366
194
+ "text": datasets.Value(
195
+ "string"
196
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
197
+ }
198
+ ],
199
+ },
200
+ )
201
+
202
+ elif self.config.schema == "bigbio_kb":
203
+ features = kb_features
204
+
205
+ return datasets.DatasetInfo(
206
+ description=_DESCRIPTION,
207
+ features=features,
208
+ homepage=_HOMEPAGE,
209
+ license=str(_LICENSE),
210
+ citation=_CITATION,
211
+ )
212
+
213
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
214
+ """Returns SplitGenerators."""
215
+
216
+ # Download gets entire git repo containing unused data from other datasets
217
+ repo_dir = Path(dl_manager.download_and_extract(_URLS[0]))
218
+ data_dir = repo_dir / "data"
219
+ data_dir.mkdir(exist_ok=True)
220
+
221
+ # Find the relevant files from Verspor2013 and move them to a new directory
222
+ verspoor_files = repo_dir.glob("*/*/*Verspoor2013/**/*")
223
+ for file in verspoor_files:
224
+ if file.is_file() and "readme" not in str(file):
225
+ file.rename(data_dir / file.name)
226
+
227
+ # Delete all unused files and directories from the original download
228
+ for x in repo_dir.glob("[!data]*"):
229
+ if x.is_file():
230
+ x.unlink()
231
+ elif x.is_dir():
232
+ rmtree(x)
233
+
234
+ data_files = {"text_files": list(data_dir.glob("*.txt"))}
235
+
236
+ return [
237
+ datasets.SplitGenerator(
238
+ name=datasets.Split.TRAIN,
239
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
240
+ gen_kwargs={
241
+ "data_files": data_files,
242
+ "split": "train",
243
+ },
244
+ )
245
+ ]
246
+
247
+ def _generate_examples(self, data_files, split: str) -> Tuple[int, Dict]:
248
+ """Yields examples as (key, example) tuples."""
249
+
250
+ if self.config.schema == "source":
251
+ txt_files = data_files["text_files"]
252
+ for guid, txt_file in enumerate(txt_files):
253
+ example = parsing.parse_brat_file(txt_file)
254
+ example["id"] = str(guid)
255
+ yield guid, example
256
+
257
+ elif self.config.schema == "bigbio_kb":
258
+ txt_files = data_files["text_files"]
259
+ for guid, txt_file in enumerate(txt_files):
260
+ example = parsing.brat_parse_to_bigbio_kb(
261
+ parsing.parse_brat_file(txt_file)
262
+ )
263
+ example["id"] = str(guid)
264
+ yield guid, example
265
+ else:
266
+ raise ValueError(f"Invalid config: {self.config.name}")