Datasets:

Languages:
Spanish
License:
gabrielaltay commited on
Commit
ec55ea7
·
1 Parent(s): 782e39a

upload hubscripts/cantemist_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. cantemist.py +370 -0
cantemist.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ A dataset loading script for the CANTEMIST corpus.
18
+
19
+ The CANTEMIST datset is collection of 1301 oncological clinical case reports
20
+ written in Spanish, with tumor morphology mentions manually annotated and
21
+ mapped by clinical experts to a controlled terminology. Every tumor morphology
22
+ mention is linked to an eCIE-O code (the Spanish equivalent of ICD-O).
23
+ """
24
+
25
+ import os
26
+ from pathlib import Path
27
+ from typing import Dict, List, Tuple
28
+
29
+ import datasets
30
+ import pandas as pd
31
+
32
+ from .bigbiohub import kb_features
33
+ from .bigbiohub import BigBioConfig
34
+ from .bigbiohub import Tasks
35
+
36
+ _LANGUAGES = ['Spanish']
37
+ _PUBMED = False
38
+ _LOCAL = False
39
+ _CITATION = """\
40
+ @article{miranda2020named,
41
+ title={Named Entity Recognition, Concept Normalization and Clinical Coding: Overview of the Cantemist Track for Cancer Text Mining in Spanish, Corpus, Guidelines, Methods and Results.},
42
+ author={Miranda-Escalada, Antonio and Farr{\'e}, Eul{\`a}lia and Krallinger, Martin},
43
+ journal={IberLEF@ SEPLN},
44
+ pages={303--323},
45
+ year={2020}
46
+ }
47
+ """
48
+
49
+ _DATASETNAME = "cantemist"
50
+ _DISPLAYNAME = "CANTEMIST"
51
+
52
+ _DESCRIPTION = """\
53
+ Collection of 1301 oncological clinical case reports written in Spanish, with tumor morphology mentions \
54
+ manually annotated and mapped by clinical experts to a controlled terminology. Every tumor morphology \
55
+ mention is linked to an eCIE-O code (the Spanish equivalent of ICD-O).
56
+
57
+ The original dataset is distributed in Brat format, and was randomly sampled into 3 subsets. \
58
+ The training, development and test sets contain 501, 500 and 300 documents each, respectively.
59
+
60
+ This dataset was designed for the CANcer TExt Mining Shared Task, sponsored by Plan-TL. \
61
+ The task is divided in 3 subtasks: CANTEMIST-NER, CANTEMIST_NORM and CANTEMIST-CODING.
62
+
63
+ CANTEMIST-NER track: requires finding automatically tumor morphology mentions. All tumor morphology \
64
+ mentions are defined by their corresponding character offsets in UTF-8 plain text medical documents.
65
+
66
+ CANTEMIST-NORM track: clinical concept normalization or named entity normalization task that requires \
67
+ to return all tumor morphology entity mentions together with their corresponding eCIE-O-3.1 codes \
68
+ i.e. finding and normalizing tumor morphology mentions.
69
+
70
+ CANTEMIST-CODING track: requires returning for each of document a ranked list of its corresponding ICD-O-3 \
71
+ codes. This it is essentially a sort of indexing or multi-label classification task or oncology clinical coding.
72
+
73
+ For further information, please visit https://temu.bsc.es/cantemist or send an email to [email protected]
74
+ """
75
+
76
+ _HOMEPAGE = "https://temu.bsc.es/cantemist/?p=4338"
77
+
78
+ _LICENSE = 'Creative Commons Attribution 4.0 International'
79
+
80
+ _URLS = {
81
+ "cantemist": "https://zenodo.org/record/3978041/files/cantemist.zip?download=1",
82
+ }
83
+
84
+ _SUPPORTED_TASKS = [
85
+ Tasks.NAMED_ENTITY_RECOGNITION,
86
+ Tasks.NAMED_ENTITY_DISAMBIGUATION,
87
+ Tasks.TEXT_CLASSIFICATION,
88
+ ]
89
+
90
+ _SOURCE_VERSION = "1.6.0"
91
+
92
+ _BIGBIO_VERSION = "1.0.0"
93
+
94
+
95
+ class CantemistDataset(datasets.GeneratorBasedBuilder):
96
+ """Manually annotated collection of oncological clinical case reports written in Spanish."""
97
+
98
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
99
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
100
+
101
+ BUILDER_CONFIGS = [
102
+ BigBioConfig(
103
+ name="cantemist_source",
104
+ version=SOURCE_VERSION,
105
+ description="CANTEMIST source schema",
106
+ schema="source",
107
+ subset_id="cantemist",
108
+ ),
109
+ BigBioConfig(
110
+ name="cantemist_bigbio_kb",
111
+ version=BIGBIO_VERSION,
112
+ description="CANTEMIST BigBio schema for the NER and NED tasks",
113
+ schema="bigbio_kb",
114
+ subset_id="subtracks_1_2",
115
+ ),
116
+ BigBioConfig(
117
+ name="cantemist_bigbio_text",
118
+ version=BIGBIO_VERSION,
119
+ description="CANTEMIST BigBio schema for the CODING task",
120
+ schema="bigbio_text",
121
+ subset_id="subtrack_3",
122
+ ),
123
+ ]
124
+
125
+ DEFAULT_CONFIG_NAME = "cantemist_source"
126
+
127
+ def _info(self) -> datasets.DatasetInfo:
128
+
129
+ if self.config.schema == "source":
130
+ features = datasets.Features(
131
+ {
132
+ "id": datasets.Value("string"),
133
+ "document_id": datasets.Value("string"),
134
+ "text": datasets.Value("string"),
135
+ "labels": [datasets.Value("string")], # subtrack 3 codes
136
+ "text_bound_annotations": [ # T line in brat
137
+ {
138
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
139
+ "text": datasets.Sequence(datasets.Value("string")),
140
+ "type": datasets.Value("string"),
141
+ "id": datasets.Value("string"),
142
+ }
143
+ ],
144
+ "events": [ # E line in brat
145
+ {
146
+ "trigger": datasets.Value("string"),
147
+ "id": datasets.Value("string"),
148
+ "type": datasets.Value("string"),
149
+ "arguments": datasets.Sequence(
150
+ {
151
+ "role": datasets.Value("string"),
152
+ "ref_id": datasets.Value("string"),
153
+ }
154
+ ),
155
+ }
156
+ ],
157
+ "relations": [ # R line in brat
158
+ {
159
+ "id": datasets.Value("string"),
160
+ "head": {
161
+ "ref_id": datasets.Value("string"),
162
+ "role": datasets.Value("string"),
163
+ },
164
+ "tail": {
165
+ "ref_id": datasets.Value("string"),
166
+ "role": datasets.Value("string"),
167
+ },
168
+ "type": datasets.Value("string"),
169
+ }
170
+ ],
171
+ "equivalences": [ # Equiv line in brat
172
+ {
173
+ "id": datasets.Value("string"),
174
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
175
+ }
176
+ ],
177
+ "attributes": [ # M or A lines in brat
178
+ {
179
+ "id": datasets.Value("string"),
180
+ "type": datasets.Value("string"),
181
+ "ref_id": datasets.Value("string"),
182
+ "value": datasets.Value("string"),
183
+ }
184
+ ],
185
+ "normalizations": [ # N lines in brat
186
+ {
187
+ "id": datasets.Value("string"),
188
+ "type": datasets.Value("string"),
189
+ "ref_id": datasets.Value("string"),
190
+ "resource_name": datasets.Value("string"),
191
+ "cuid": datasets.Value("string"),
192
+ "text": datasets.Value("string"),
193
+ }
194
+ ],
195
+ "notes": [ # # lines in brat
196
+ {
197
+ "id": datasets.Value("string"),
198
+ "type": datasets.Value("string"),
199
+ "ref_id": datasets.Value("string"),
200
+ "text": datasets.Value("string"),
201
+ }
202
+ ],
203
+ },
204
+ )
205
+
206
+ elif self.config.schema == "bigbio_kb":
207
+ features = kb_features
208
+
209
+ elif self.config.schema == "bigbio_text":
210
+ features = text_features
211
+
212
+ return datasets.DatasetInfo(
213
+ description=_DESCRIPTION,
214
+ features=features,
215
+ homepage=_HOMEPAGE,
216
+ license=str(_LICENSE),
217
+ citation=_CITATION,
218
+ )
219
+
220
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
221
+ """
222
+ Downloads/extracts the data to generate the train, validation and test splits.
223
+
224
+ Each split is created by instantiating a `datasets.SplitGenerator`, which will
225
+ call `this._generate_examples` with the keyword arguments in `gen_kwargs`.
226
+ """
227
+
228
+ data_dir = dl_manager.download_and_extract(_URLS["cantemist"])
229
+
230
+ return [
231
+ datasets.SplitGenerator(
232
+ name=datasets.Split.TRAIN,
233
+ gen_kwargs={
234
+ "filepaths": {
235
+ "task1": Path(
236
+ os.path.join(data_dir, "train-set/cantemist-ner")
237
+ ),
238
+ "task2": Path(
239
+ os.path.join(data_dir, "train-set/cantemist-norm")
240
+ ),
241
+ "task3": Path(
242
+ os.path.join(data_dir, "train-set/cantemist-coding")
243
+ ),
244
+ },
245
+ "split": "train",
246
+ },
247
+ ),
248
+ datasets.SplitGenerator(
249
+ name=datasets.Split.TEST,
250
+ gen_kwargs={
251
+ "filepaths": {
252
+ "task1": Path(os.path.join(data_dir, "test-set/cantemist-ner")),
253
+ "task2": Path(
254
+ os.path.join(data_dir, "test-set/cantemist-norm")
255
+ ),
256
+ "task3": Path(
257
+ os.path.join(data_dir, "test-set/cantemist-coding")
258
+ ),
259
+ },
260
+ "split": "test",
261
+ },
262
+ ),
263
+ datasets.SplitGenerator(
264
+ name=datasets.Split.VALIDATION,
265
+ gen_kwargs={
266
+ "filepaths": {
267
+ "task1_set1": Path(
268
+ os.path.join(data_dir, "dev-set1/cantemist-ner")
269
+ ),
270
+ "task1_set2": Path(
271
+ os.path.join(data_dir, "dev-set2/cantemist-ner")
272
+ ),
273
+ "task2_set1": Path(
274
+ os.path.join(data_dir, "dev-set1/cantemist-norm")
275
+ ),
276
+ "task2_set2": Path(
277
+ os.path.join(data_dir, "dev-set2/cantemist-norm")
278
+ ),
279
+ "task3_set1": Path(
280
+ os.path.join(data_dir, "dev-set1/cantemist-coding")
281
+ ),
282
+ "task3_set2": Path(
283
+ os.path.join(data_dir, "dev-set2/cantemist-coding")
284
+ ),
285
+ },
286
+ "split": "dev",
287
+ },
288
+ ),
289
+ ]
290
+
291
+ def _generate_examples(self, filepaths, split: str) -> Tuple[int, Dict]:
292
+ """
293
+ This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
294
+ Method parameters are unpacked from `gen_kwargs` as given in `_split_generators`.
295
+ """
296
+
297
+ if split != "dev":
298
+ txt_files_task1 = list(filepaths["task1"].glob("*txt"))
299
+ txt_files_task2 = list(filepaths["task2"].glob("*txt"))
300
+ tsv_file_task3 = Path(
301
+ os.path.join(filepaths["task3"], f"{split}-coding.tsv")
302
+ )
303
+ task3_df = pd.read_csv(tsv_file_task3, sep="\t", header=None)
304
+ else:
305
+ txt_files_task1, txt_files_task2, dfs = [], [], []
306
+ for i in range(1, 3):
307
+ txt_files_task1 += list(filepaths[f"task1_set{i}"].glob("*txt"))
308
+ txt_files_task2 += list(filepaths[f"task2_set{i}"].glob("*txt"))
309
+ tsv_file_task3 = Path(
310
+ os.path.join(filepaths[f"task3_set{i}"], f"{split}{i}-coding.tsv")
311
+ )
312
+ df = pd.read_csv(tsv_file_task3, sep="\t", header=0)
313
+ dfs.append(df)
314
+ task3_df = pd.concat(dfs)
315
+
316
+ if self.config.schema == "source" or self.config.schema == "bigbio_text":
317
+ task3_dict = {}
318
+ for idx, row in task3_df.iterrows():
319
+ file, code = row[0], row[1]
320
+ if file not in task3_dict:
321
+ task3_dict[file] = [code]
322
+ else:
323
+ task3_dict[file] += [code]
324
+
325
+ if self.config.schema == "source":
326
+ for guid, txt_file in enumerate(txt_files_task2):
327
+ example = parsing.parse_brat_file(txt_file, parse_notes=True)
328
+ if example["document_id"] in task3_dict:
329
+ example["labels"] = task3_dict[example["document_id"]]
330
+ else:
331
+ example[
332
+ "labels"
333
+ ] = (
334
+ []
335
+ ) # few cases where subtrack 3 has no codes for the current document
336
+ example["id"] = str(guid)
337
+ yield guid, example
338
+
339
+ elif self.config.schema == "bigbio_kb":
340
+ for guid, txt_file in enumerate(txt_files_task2):
341
+ parsed_brat = parsing.parse_brat_file(txt_file, parse_notes=True)
342
+ example = parsing.brat_parse_to_bigbio_kb(parsed_brat)
343
+ example["id"] = str(guid)
344
+ for i in range(0, len(example["entities"])):
345
+ normalized_dict = {
346
+ "db_id": parsed_brat["notes"][i]["text"],
347
+ "db_name": "eCIE-O-3.1",
348
+ }
349
+ example["entities"][i]["normalized"].append(normalized_dict)
350
+ yield guid, example
351
+
352
+ elif self.config.schema == "bigbio_text":
353
+ for guid, txt_file in enumerate(txt_files_task1):
354
+ parsed_brat = parsing.parse_brat_file(txt_file, parse_notes=False)
355
+ if parsed_brat["document_id"] in task3_dict:
356
+ labels = task3_dict[parsed_brat["document_id"]]
357
+ else:
358
+ labels = (
359
+ []
360
+ ) # few cases where subtrack 3 has no codes for the current document
361
+ example = {
362
+ "id": str(guid),
363
+ "document_id": parsed_brat["document_id"],
364
+ "text": parsed_brat["text"],
365
+ "labels": labels,
366
+ }
367
+ yield guid, example
368
+
369
+ else:
370
+ raise ValueError(f"Invalid config: {self.config.name}")