Datasets:

Languages:
English
License:
gabrielaltay commited on
Commit
4afeae3
·
1 Parent(s): abedd23

upload hubscripts/pcr_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. pcr.py +229 -0
pcr.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ A corpus for plant and chemical entities and for the relationships between them.
17
+ The corpus contains 2218 plant and chemical entities and 600 plant-chemical
18
+ relationships which are drawn from 1109 sentences in 245 PubMed abstracts.
19
+ """
20
+ from pathlib import Path
21
+ from typing import Dict, Iterator, Tuple
22
+
23
+ import datasets
24
+
25
+ from .bigbiohub import kb_features
26
+ from .bigbiohub import BigBioConfig
27
+ from .bigbiohub import Tasks
28
+
29
+ _LANGUAGES = ['English']
30
+ _PUBMED = True
31
+ _LOCAL = False
32
+ _CITATION = """\
33
+ @article{choi2016corpus,
34
+ title = {A corpus for plant-chemical relationships in the biomedical domain},
35
+ author = {
36
+ Choi, Wonjun and Kim, Baeksoo and Cho, Hyejin and Lee, Doheon and Lee,
37
+ Hyunju
38
+ },
39
+ year = 2016,
40
+ journal = {BMC bioinformatics},
41
+ publisher = {Springer},
42
+ volume = 17,
43
+ number = 1,
44
+ pages = {1--15}
45
+ }
46
+ """
47
+
48
+ _DATASETNAME = "pcr"
49
+ _DISPLAYNAME = "PCR"
50
+
51
+ _DESCRIPTION = """
52
+ A corpus for plant / herb and chemical entities and for the relationships \
53
+ between them. The corpus contains 2218 plant and chemical entities and 600 \
54
+ plant-chemical relationships which are drawn from 1109 sentences in 245 PubMed \
55
+ abstracts.
56
+ """
57
+
58
+ _HOMEPAGE = "http://210.107.182.73/plantchemcorpus.htm"
59
+ _LICENSE = 'License information unavailable'
60
+
61
+ _URLS = {_DATASETNAME: "http://210.107.182.73/1109_corpus_units_STformat.tar"}
62
+
63
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.EVENT_EXTRACTION]
64
+
65
+ _SOURCE_VERSION = "1.0.0"
66
+ _BIGBIO_VERSION = "1.0.0"
67
+
68
+
69
+ class PCRDataset(datasets.GeneratorBasedBuilder):
70
+ """
71
+ The corpus of plant-chemical relation consists of plants / herbs and
72
+ chemicals and relations between them.
73
+ """
74
+
75
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
76
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
77
+
78
+ BUILDER_CONFIGS = [
79
+ BigBioConfig(
80
+ name="pcr_source",
81
+ version=SOURCE_VERSION,
82
+ description="PCR source schema",
83
+ schema="source",
84
+ subset_id="pcr",
85
+ ),
86
+ BigBioConfig(
87
+ name="pcr_fixed_source",
88
+ version=SOURCE_VERSION,
89
+ description="PCR (with fixed offsets) source schema",
90
+ schema="source",
91
+ subset_id="pcr_fixed",
92
+ ),
93
+ BigBioConfig(
94
+ name="pcr_bigbio_kb",
95
+ version=BIGBIO_VERSION,
96
+ description="PCR BigBio schema",
97
+ schema="bigbio_kb",
98
+ subset_id="pcr",
99
+ ),
100
+ ]
101
+
102
+ DEFAULT_CONFIG_NAME = "pcr_source"
103
+
104
+ def _info(self):
105
+ if self.config.schema == "source":
106
+ features = datasets.Features(
107
+ {
108
+ "document_id": datasets.Value("string"),
109
+ "text": datasets.Value("string"),
110
+ "entities": [
111
+ {
112
+ "id": datasets.Value("string"),
113
+ "type": datasets.Value("string"),
114
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
115
+ "text": datasets.Sequence(datasets.Value("string")),
116
+ "normalized": [
117
+ {
118
+ "db_name": datasets.Value("string"),
119
+ "db_id": datasets.Value("string"),
120
+ }
121
+ ],
122
+ }
123
+ ],
124
+ "events": [
125
+ {
126
+ "id": datasets.Value("string"),
127
+ "type": datasets.Value("string"),
128
+ # refers to the text_bound_annotation of the trigger
129
+ "trigger": {
130
+ "text": datasets.Sequence(datasets.Value("string")),
131
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
132
+ },
133
+ "arguments": [
134
+ {
135
+ "role": datasets.Value("string"),
136
+ "ref_id": datasets.Value("string"),
137
+ }
138
+ ],
139
+ }
140
+ ],
141
+ },
142
+ )
143
+
144
+ elif self.config.schema == "bigbio_kb":
145
+ features = kb_features
146
+
147
+ return datasets.DatasetInfo(
148
+ description=_DESCRIPTION,
149
+ features=features,
150
+ homepage=_HOMEPAGE,
151
+ license=str(_LICENSE),
152
+ citation=_CITATION,
153
+ )
154
+
155
+ def _split_generators(self, dl_manager):
156
+ urls = _URLS[_DATASETNAME]
157
+ data_dir = Path(dl_manager.download_and_extract(urls))
158
+ data_dir = data_dir / "1109 corpus units"
159
+
160
+ return [
161
+ datasets.SplitGenerator(
162
+ name=datasets.Split.TRAIN,
163
+ gen_kwargs={"data_dir": data_dir},
164
+ )
165
+ ]
166
+
167
+ def _generate_examples(self, data_dir: Path) -> Iterator[Tuple[str, Dict]]:
168
+ if self.config.schema == "source":
169
+ for file in data_dir.iterdir():
170
+ if not str(file).endswith(".txt"):
171
+ continue
172
+
173
+ example = parsing.parse_brat_file(file)
174
+ example = parsing.brat_parse_to_bigbio_kb(example)
175
+ example = self._to_source_example(example)
176
+
177
+ # Three documents have incorrect offsets - fix them for fixed_source scheme
178
+ if self.config.subset_id == "pcr_fixed" and example["document_id"] in [
179
+ "463",
180
+ "509",
181
+ "566",
182
+ ]:
183
+ example = self._fix_example(example)
184
+
185
+ yield example["document_id"], example
186
+
187
+ elif self.config.schema == "bigbio_kb":
188
+ for file in data_dir.iterdir():
189
+ if not str(file).endswith(".txt"):
190
+ continue
191
+
192
+ example = parsing.parse_brat_file(file)
193
+ example = parsing.brat_parse_to_bigbio_kb(example)
194
+
195
+ document_id = example["document_id"]
196
+ example["id"] = document_id
197
+
198
+ # Three documents have incorrect offsets - fix them for BigBio scheme
199
+ if document_id in ["463", "509", "566"]:
200
+ example = self._fix_example(example)
201
+
202
+ yield example["id"], example
203
+
204
+ def _to_source_example(self, bigbio_example: Dict) -> Dict:
205
+ """
206
+ Converts an example in BigBio-KB scheme to an example according to the source scheme
207
+ """
208
+ source_example = bigbio_example.copy()
209
+ source_example["text"] = bigbio_example["passages"][0]["text"][0]
210
+
211
+ source_example.pop("passages", None)
212
+ source_example.pop("relations", None)
213
+ source_example.pop("coreferences", None)
214
+
215
+ return source_example
216
+
217
+ def _fix_example(self, example: Dict) -> Dict:
218
+ """
219
+ Fixes by the example by adapting the offsets of the trigger word of the first
220
+ event. In the official annotation data the end offset is incorrect (for 3 examples).
221
+ """
222
+ first_event = example["events"][0]
223
+ trigger_text = first_event["trigger"]["text"][0]
224
+ offsets = first_event["trigger"]["offsets"][0]
225
+
226
+ real_offsets = [offsets[0], offsets[0] + len(trigger_text)]
227
+ example["events"][0]["trigger"]["offsets"] = [real_offsets]
228
+
229
+ return example