Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
5a19146
·
1 Parent(s): 66f99b7

upload hubscripts/bionlp_st_2013_cg_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bionlp_st_2013_cg.py +268 -0
bionlp_st_2013_cg.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List
18
+
19
+ import datasets
20
+
21
+ from .bigbiohub import kb_features
22
+ from .bigbiohub import BigBioConfig
23
+ from .bigbiohub import Tasks
24
+
25
+ _DATASETNAME = "bionlp_st_2013_cg"
26
+ _DISPLAYNAME = "BioNLP 2013 CG"
27
+
28
+ _UNIFIED_VIEW_NAME = "bigbio"
29
+
30
+ _LANGUAGES = ['English']
31
+ _PUBMED = True
32
+ _LOCAL = False
33
+ _CITATION = """\
34
+ @inproceedings{pyysalo-etal-2013-overview,
35
+ title = "Overview of the Cancer Genetics ({CG}) task of {B}io{NLP} Shared Task 2013",
36
+ author = "Pyysalo, Sampo and
37
+ Ohta, Tomoko and
38
+ Ananiadou, Sophia",
39
+ booktitle = "Proceedings of the {B}io{NLP} Shared Task 2013 Workshop",
40
+ month = aug,
41
+ year = "2013",
42
+ address = "Sofia, Bulgaria",
43
+ publisher = "Association for Computational Linguistics",
44
+ url = "https://aclanthology.org/W13-2008",
45
+ pages = "58--66",
46
+ }
47
+ """
48
+
49
+ _DESCRIPTION = """\
50
+ the Cancer Genetics (CG) is a event extraction task and a main task of the BioNLP Shared Task (ST) 2013.
51
+ The CG task is an information extraction task targeting the recognition of events in text,
52
+ represented as structured n-ary associations of given physical entities. In addition to
53
+ addressing the cancer domain, the CG task is differentiated from previous event extraction
54
+ tasks in the BioNLP ST series in addressing a wide range of pathological processes and multiple
55
+ levels of biological organization, ranging from the molecular through the cellular and organ
56
+ levels up to whole organisms. Final test set submissions were accepted from six teams
57
+ """
58
+
59
+ _HOMEPAGE = "https://github.com/openbiocorpora/bionlp-st-2013-cg"
60
+
61
+ _LICENSE = 'GENIA Project License for Annotated Corpora'
62
+
63
+ _URLs = {
64
+ "bionlp_st_2013_cg": "https://github.com/openbiocorpora/bionlp-st-2013-cg/archive/refs/heads/master.zip",
65
+ }
66
+
67
+ _SUPPORTED_TASKS = [
68
+ Tasks.EVENT_EXTRACTION,
69
+ Tasks.NAMED_ENTITY_RECOGNITION,
70
+ Tasks.COREFERENCE_RESOLUTION,
71
+ ]
72
+ _SOURCE_VERSION = "1.0.0"
73
+ _BIGBIO_VERSION = "1.0.0"
74
+
75
+
76
+ class bionlp_st_2013_cg(datasets.GeneratorBasedBuilder):
77
+ """the Cancer Genetics (CG) is a event extraction task
78
+ and a main task of the BioNLP Shared Task (ST) 2013."""
79
+
80
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
81
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
82
+
83
+ BUILDER_CONFIGS = [
84
+ BigBioConfig(
85
+ name="bionlp_st_2013_cg_source",
86
+ version=SOURCE_VERSION,
87
+ description="bionlp_st_2013 source schema",
88
+ schema="source",
89
+ subset_id="bionlp_st_2013_pc",
90
+ ),
91
+ BigBioConfig(
92
+ name="bionlp_st_2013_cg_bigbio_kb",
93
+ version=BIGBIO_VERSION,
94
+ description="bionlp_st_2013_cg BigBio schema",
95
+ schema="bigbio_kb",
96
+ subset_id="bionlp_st_2013_pc",
97
+ ),
98
+ ]
99
+
100
+ DEFAULT_CONFIG_NAME = "bionlp_st_2013_cg_source"
101
+
102
+ _ROLE_MAPPING = {
103
+ "Theme2": "Theme",
104
+ "Theme3": "Theme",
105
+ "Theme4": "Theme",
106
+ "Theme5": "Theme",
107
+ "Theme6": "Theme",
108
+ "Instrument2": "Instrument",
109
+ "Instrument3": "Instrument",
110
+ "Participant2": "Participant",
111
+ "Participant3": "Participant",
112
+ "Participant4": "Participant",
113
+ "Cause2": "Cause",
114
+ }
115
+
116
+ def _info(self):
117
+ """
118
+ - `features` defines the schema of the parsed data set. The schema depends on the
119
+ chosen `config`: If it is `_SOURCE_VIEW_NAME` the schema is the schema of the
120
+ original data. If `config` is `_UNIFIED_VIEW_NAME`, then the schema is the
121
+ canonical KB-task schema defined in `biomedical/schemas/kb.py`.
122
+ """
123
+ if self.config.schema == "source":
124
+ features = datasets.Features(
125
+ {
126
+ "id": datasets.Value("string"),
127
+ "document_id": datasets.Value("string"),
128
+ "text": datasets.Value("string"),
129
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
130
+ {
131
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
132
+ "text": datasets.Sequence(datasets.Value("string")),
133
+ "type": datasets.Value("string"),
134
+ "id": datasets.Value("string"),
135
+ }
136
+ ],
137
+ "events": [ # E line in brat
138
+ {
139
+ "trigger": datasets.Value(
140
+ "string"
141
+ ), # refers to the text_bound_annotation of the trigger,
142
+ "id": datasets.Value("string"),
143
+ "type": datasets.Value("string"),
144
+ "arguments": datasets.Sequence(
145
+ {
146
+ "role": datasets.Value("string"),
147
+ "ref_id": datasets.Value("string"),
148
+ }
149
+ ),
150
+ }
151
+ ],
152
+ "relations": [ # R line in brat
153
+ {
154
+ "id": datasets.Value("string"),
155
+ "head": {
156
+ "ref_id": datasets.Value("string"),
157
+ "role": datasets.Value("string"),
158
+ },
159
+ "tail": {
160
+ "ref_id": datasets.Value("string"),
161
+ "role": datasets.Value("string"),
162
+ },
163
+ "type": datasets.Value("string"),
164
+ }
165
+ ],
166
+ "equivalences": [ # Equiv line in brat
167
+ {
168
+ "id": datasets.Value("string"),
169
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
170
+ }
171
+ ],
172
+ "attributes": [ # M or A lines in brat
173
+ {
174
+ "id": datasets.Value("string"),
175
+ "type": datasets.Value("string"),
176
+ "ref_id": datasets.Value("string"),
177
+ "value": datasets.Value("string"),
178
+ }
179
+ ],
180
+ "normalizations": [ # N lines in brat
181
+ {
182
+ "id": datasets.Value("string"),
183
+ "type": datasets.Value("string"),
184
+ "ref_id": datasets.Value("string"),
185
+ "resource_name": datasets.Value(
186
+ "string"
187
+ ), # Name of the resource, e.g. "Wikipedia"
188
+ "cuid": datasets.Value(
189
+ "string"
190
+ ), # ID in the resource, e.g. 534366
191
+ "text": datasets.Value(
192
+ "string"
193
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
194
+ }
195
+ ],
196
+ },
197
+ )
198
+ elif self.config.schema == "bigbio_kb":
199
+ features = kb_features
200
+ return datasets.DatasetInfo(
201
+ # This is the description that will appear on the datasets page.
202
+ description=_DESCRIPTION,
203
+ features=features,
204
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
205
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
206
+ # This is not applicable for MLEE.
207
+ # supervised_keys=("sentence", "label"),
208
+ # Homepage of the dataset for documentation
209
+ homepage=_HOMEPAGE,
210
+ # License for the dataset if available
211
+ license=str(_LICENSE),
212
+ # Citation for the dataset
213
+ citation=_CITATION,
214
+ )
215
+
216
+ def _split_generators(
217
+ self, dl_manager: datasets.DownloadManager
218
+ ) -> List[datasets.SplitGenerator]:
219
+ my_urls = _URLs[_DATASETNAME]
220
+ data_dir = Path(dl_manager.download_and_extract(my_urls))
221
+ data_files = {
222
+ "train": data_dir / f"bionlp-st-2013-cg-master" / "original-data" / "train",
223
+ "dev": data_dir / f"bionlp-st-2013-cg-master" / "original-data" / "devel",
224
+ "test": data_dir / f"bionlp-st-2013-cg-master" / "original-data" / "test",
225
+ }
226
+
227
+ return [
228
+ datasets.SplitGenerator(
229
+ name=datasets.Split.TRAIN,
230
+ gen_kwargs={"data_files": data_files["train"]},
231
+ ),
232
+ datasets.SplitGenerator(
233
+ name=datasets.Split.VALIDATION,
234
+ gen_kwargs={"data_files": data_files["dev"]},
235
+ ),
236
+ datasets.SplitGenerator(
237
+ name=datasets.Split.TEST,
238
+ gen_kwargs={"data_files": data_files["test"]},
239
+ ),
240
+ ]
241
+
242
+ def _standardize_arguments_roles(self, kb_example: Dict) -> Dict:
243
+
244
+ for event in kb_example["events"]:
245
+ for argument in event["arguments"]:
246
+ role = argument["role"]
247
+ argument["role"] = self._ROLE_MAPPING.get(role, role)
248
+
249
+ return kb_example
250
+
251
+ def _generate_examples(self, data_files: Path):
252
+ if self.config.schema == "source":
253
+ txt_files = list(data_files.glob("*txt"))
254
+ for guid, txt_file in enumerate(txt_files):
255
+ example = parsing.parse_brat_file(txt_file)
256
+ example["id"] = str(guid)
257
+ yield guid, example
258
+ elif self.config.schema == "bigbio_kb":
259
+ txt_files = list(data_files.glob("*txt"))
260
+ for guid, txt_file in enumerate(txt_files):
261
+ example = parsing.brat_parse_to_bigbio_kb(
262
+ parsing.parse_brat_file(txt_file)
263
+ )
264
+ example = self._standardize_arguments_roles(example)
265
+ example["id"] = str(guid)
266
+ yield guid, example
267
+ else:
268
+ raise ValueError(f"Invalid config: {self.config.name}")