Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
237995b
·
1 Parent(s): acfb1a5

upload hubscripts/hprd50_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. hprd50.py +332 -0
hprd50.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ HPRD50 is a dataset of randomly selected, hand-annotated abstracts of biomedical papers
18
+ referenced by the Human Protein Reference Database (HPRD). It is parsed in XML format,
19
+ splitting each abstract into sentences, and in each sentence there may be entities and
20
+ interactions between those entities. In this particular dataset, entities are all
21
+ proteins and interactions are thus protein-protein interactions.
22
+
23
+ Moreover, all entities are normalized to the HPRD database. These normalized terms are
24
+ stored in each entity's 'type' attribute in the source XML. This means the dataset can
25
+ determine e.g. that "Janus kinase 2" and "Jak2" are referencing the same normalized
26
+ entity.
27
+
28
+ Because the dataset contains entities and relations, it is suitable for Named Entity
29
+ Recognition and Relation Extraction.
30
+ """
31
+
32
+ import os
33
+ from glob import glob
34
+ from typing import Dict, List, Tuple
35
+ from xml.etree import ElementTree
36
+
37
+ import datasets
38
+
39
+ from .bigbiohub import kb_features
40
+ from .bigbiohub import BigBioConfig
41
+ from .bigbiohub import Tasks
42
+
43
+ # TODO: Add BibTeX citation
44
+ _LANGUAGES = ['English']
45
+ _PUBMED = True
46
+ _LOCAL = False
47
+ _CITATION = """\
48
+ @article{fundel2007relex,
49
+ title={RelEx—Relation extraction using dependency parse trees},
50
+ author={Fundel, Katrin and K{\"u}ffner, Robert and Zimmer, Ralf},
51
+ journal={Bioinformatics},
52
+ volume={23},
53
+ number={3},
54
+ pages={365--371},
55
+ year={2007},
56
+ publisher={Oxford University Press}
57
+ }
58
+ """
59
+
60
+ _DATASETNAME = "hprd50"
61
+ _DISPLAYNAME = "HPRD50"
62
+
63
+ _DESCRIPTION = """\
64
+ HPRD50 is a dataset of randomly selected, hand-annotated abstracts of biomedical papers
65
+ referenced by the Human Protein Reference Database (HPRD). It is parsed in XML format,
66
+ splitting each abstract into sentences, and in each sentence there may be entities and
67
+ interactions between those entities. In this particular dataset, entities are all
68
+ proteins and interactions are thus protein-protein interactions.
69
+
70
+ Moreover, all entities are normalized to the HPRD database. These normalized terms are
71
+ stored in each entity's 'type' attribute in the source XML. This means the dataset can
72
+ determine e.g. that "Janus kinase 2" and "Jak2" are referencing the same normalized
73
+ entity.
74
+
75
+ Because the dataset contains entities and relations, it is suitable for Named Entity
76
+ Recognition and Relation Extraction.
77
+ """
78
+
79
+ _HOMEPAGE = ""
80
+
81
+ _LICENSE = 'License information unavailable'
82
+
83
+ _URLS = {
84
+ _DATASETNAME: "https://github.com/metalrt/ppi-dataset/zipball/master",
85
+ }
86
+
87
+ _SUPPORTED_TASKS = [
88
+ Tasks.RELATION_EXTRACTION,
89
+ Tasks.NAMED_ENTITY_RECOGNITION,
90
+ ] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
91
+
92
+ _SOURCE_VERSION = "1.0.0"
93
+
94
+ _BIGBIO_VERSION = "1.0.0"
95
+
96
+
97
+ def parse_xml_source(document_trees):
98
+ entries = []
99
+ for doc in document_trees:
100
+ document = {
101
+ "id": doc.get("id"),
102
+ "origId": doc.get("origId"),
103
+ "set": doc.get("test"),
104
+ "sentences": [],
105
+ }
106
+ for s in doc.findall("sentence"):
107
+ sentence = {
108
+ "id": s.get("id"),
109
+ "origId": s.get("origId"),
110
+ "charOffset": s.get("charOffset"),
111
+ "text": s.get("text"),
112
+ "entities": [],
113
+ "interactions": [],
114
+ }
115
+
116
+ for e in s.findall("entity"):
117
+ entity = {
118
+ "id": e.get("id"),
119
+ "origId": e.get("origId"),
120
+ "charOffset": e.get("charOffset"),
121
+ "text": e.get("text"),
122
+ "type": e.get("type"),
123
+ }
124
+
125
+ sentence["entities"].append(entity)
126
+
127
+ for i in s.findall("interaction"):
128
+ interaction = {
129
+ "id": i.get("id"),
130
+ "e1": i.get("e1"),
131
+ "e2": i.get("e2"),
132
+ "type": i.get("type"),
133
+ }
134
+ sentence["interactions"].append(interaction)
135
+
136
+ document["sentences"].append(sentence)
137
+
138
+ entries.append(document)
139
+ return entries
140
+
141
+
142
+ def parse_xml_bigbio_kb(document_trees):
143
+ entries = []
144
+ for doc in document_trees:
145
+ document = {
146
+ "id": doc.get("id"),
147
+ "document_id": doc.get("origId"),
148
+ "passages": [],
149
+ "entities": [],
150
+ "relations": [],
151
+ "events": [],
152
+ "coreferences": [],
153
+ }
154
+ for s in doc.findall("sentence"):
155
+
156
+ offset = s.get("charOffset").split("-")
157
+ start = int(offset[0])
158
+ end = int(offset[1])
159
+
160
+ passage = {
161
+ "id": s.get("id"),
162
+ "type": "sentence",
163
+ "text": [s.get("text")],
164
+ "offsets": [[start, end]],
165
+ }
166
+
167
+ document["passages"].append(passage)
168
+
169
+ for e in s.findall("entity"):
170
+
171
+ offset = e.get("charOffset").split("-")
172
+ start = int(offset[0])
173
+ end = int(offset[1])
174
+
175
+ entity = {
176
+ "id": e.get("id"),
177
+ "text": [e.get("text")],
178
+ "offsets": [[start, end]],
179
+ "type": "protein",
180
+ "normalized": [{"db_name": "HPRD", "db_id": e.get("type")}],
181
+ }
182
+
183
+ document["entities"].append(entity)
184
+
185
+ for i in s.findall("interaction"):
186
+ relation = {
187
+ "id": i.get("id"),
188
+ "arg1_id": i.get("e1"),
189
+ "arg2_id": i.get("e2"),
190
+ "type": i.get("type"),
191
+ "normalized": [],
192
+ }
193
+ document["relations"].append(relation)
194
+
195
+ entries.append(document)
196
+ return entries
197
+
198
+
199
+ class HPRD50Dataset(datasets.GeneratorBasedBuilder):
200
+ """
201
+ HPRD50 is a dataset of randomly selected, hand-annotated abstracts of biomedical papers
202
+ referenced by the Human Protein Reference Database (HPRD). It is parsed in XML format,
203
+ splitting each abstract into sentences, and in each sentence there may be entities and
204
+ interactions between those entities. In this particular dataset, entities are all
205
+ proteins and interactions are thus protein-protein interactions.
206
+
207
+ Moreover, all entities are normalized to the HPRD database. These normalized terms are
208
+ stored in each entity's 'type' attribute in the source XML. This means the dataset can
209
+ determine e.g. that "Janus kinase 2" and "Jak2" are referencing the same normalized
210
+ entity.
211
+
212
+ Because the dataset contains entities and relations, it is suitable for Named Entity
213
+ Recognition and Relation Extraction.
214
+ """
215
+
216
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
217
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
218
+
219
+ BUILDER_CONFIGS = [
220
+ BigBioConfig(
221
+ name="hprd50_source",
222
+ version=SOURCE_VERSION,
223
+ description="hprd50 source schema",
224
+ schema="source",
225
+ subset_id="hprd50",
226
+ ),
227
+ BigBioConfig(
228
+ name="hprd50_bigbio_kb",
229
+ version=BIGBIO_VERSION,
230
+ description="hprd50 BigBio schema",
231
+ schema="bigbio_kb",
232
+ subset_id="hprd50",
233
+ ),
234
+ ]
235
+
236
+ DEFAULT_CONFIG_NAME = "hprd50_source"
237
+
238
+ def _info(self) -> datasets.DatasetInfo:
239
+
240
+ if self.config.schema == "source":
241
+ features = datasets.Features(
242
+ {
243
+ "id": datasets.Value("string"),
244
+ "origId": datasets.Value("string"),
245
+ "set": datasets.Value("string"),
246
+ "sentences": [
247
+ {
248
+ "id": datasets.Value("string"),
249
+ "origId": datasets.Value("string"),
250
+ "charOffset": datasets.Value("string"),
251
+ "text": datasets.Value("string"),
252
+ "entities": [
253
+ {
254
+ "id": datasets.Value("string"),
255
+ "origId": datasets.Value("string"),
256
+ "charOffset": datasets.Value("string"),
257
+ "text": datasets.Value("string"),
258
+ "type": datasets.Value("string"),
259
+ }
260
+ ],
261
+ "interactions": [
262
+ {
263
+ "id": datasets.Value("string"),
264
+ "e1": datasets.Value("string"),
265
+ "e2": datasets.Value("string"),
266
+ "type": datasets.Value("string"),
267
+ }
268
+ ],
269
+ }
270
+ ],
271
+ }
272
+ )
273
+
274
+ elif self.config.schema == "bigbio_kb":
275
+ features = kb_features
276
+
277
+ return datasets.DatasetInfo(
278
+ description=_DESCRIPTION,
279
+ features=features,
280
+ homepage=_HOMEPAGE,
281
+ license=str(_LICENSE),
282
+ citation=_CITATION,
283
+ )
284
+
285
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
286
+ """Returns SplitGenerators."""
287
+ urls = _URLS[_DATASETNAME]
288
+ data_dir = dl_manager.download_and_extract(urls)
289
+ # Files are actually a few levels down, under this subdirectory, and
290
+ # intermediate directory names get hashed so this is the easiest way to find it.
291
+ data_dir = glob(f"{data_dir}/**/csv_output")[0]
292
+
293
+ return [
294
+ datasets.SplitGenerator(
295
+ name=datasets.Split.TRAIN,
296
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
297
+ gen_kwargs={
298
+ "filepath": os.path.join(data_dir, "HPRD50-train.xml"),
299
+ "split": "train",
300
+ },
301
+ ),
302
+ datasets.SplitGenerator(
303
+ name=datasets.Split.TEST,
304
+ gen_kwargs={
305
+ "filepath": os.path.join(data_dir, "HPRD50-test.xml"),
306
+ "split": "test",
307
+ },
308
+ ),
309
+ ]
310
+
311
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
312
+ """Yields examples as (key, example) tuples."""
313
+
314
+ with open(filepath, "r") as f:
315
+ content = f.read()
316
+
317
+ tree = ElementTree.fromstring(content)
318
+ documents = tree.findall("document")
319
+
320
+ if self.config.schema == "source":
321
+ entries = parse_xml_source(documents)
322
+ for key, example in enumerate(entries):
323
+ yield key, example
324
+
325
+ elif self.config.schema == "bigbio_kb":
326
+ entries = parse_xml_bigbio_kb(documents)
327
+ for key, example in enumerate(entries):
328
+ yield key, example
329
+
330
+
331
+ # This template is based on the following template from the datasets package:
332
+ # https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py