Datasets:

Languages:
English
License:
leonweber commited on
Commit
16fe403
1 Parent(s): 9eabfc4

Update czi_drsm based on git version f5bf778

Browse files
Files changed (4) hide show
  1. README.md +66 -1
  2. __init__.py +0 -0
  3. bigbiohub.py +590 -0
  4. czi_drsm.py +410 -0
README.md CHANGED
@@ -1,3 +1,68 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - en
4
+ bigbio_language:
5
+ - English
6
+ license: cc0-1.0
7
+ bigbio_license_shortname: cc0-1.0
8
+ multilinguality: monolingual
9
+ pretty_name: CZI DRSM
10
+ homepage: https://github.com/chanzuckerberg/DRSM-corpus
11
+ bigbio_pubmed: false
12
+ bigbio_public: true
13
+ bigbio_tasks:
14
+ - TXTCLASS
15
  ---
16
+
17
+ # Dataset Card for CZI DRSM
18
+ [README.md](..%2Fmed_qa%2FREADME.md)
19
+ ## Dataset Description
20
+
21
+ - **Homepage:** https://github.com/chanzuckerberg/DRSM-corpus
22
+ - **Pubmed:** False
23
+ - **Public:** True
24
+ - **Tasks:** TXTCLASS
25
+
26
+ Research Article document classification dataset based on aspects of disease research. Currently, the dataset consists of three subsets:
27
+
28
+ (A) classifies title/abstracts of papers into most popular subtypes of clinical, basic, and translational papers (~20k papers);
29
+ - Clinical Characteristics, Disease Pathology, and Diagnosis -
30
+ Text that describes (A) symptoms, signs, or ‘phenotype’ of a disease;
31
+ (B) the effects of the disease on patient organs, tissues, or cells;
32
+ (C) the results of clinical tests that reveal pathology (including
33
+ biomarkers); (D) research that use this information to figure out
34
+ a diagnosis.
35
+ - Therapeutics in the clinic -
36
+ Text describing how treatments work in the clinic (but not in a clinical trial).
37
+ - Disease mechanism -
38
+ Text that describes either (A) mechanistic involvement of specific genes in disease
39
+ (deletions, gain of function, etc); (B) how molecular signalling or metabolism
40
+ binding, activating, phosphorylation, concentration increase, etc.)
41
+ are involved in the mechanism of a disease; or (C) the physiological
42
+ mechanism of disease at the level of tissues, organs, and body systems.
43
+ - Patient-Based Therapeutics -
44
+ Text describing (A) Clinical trials (studies of therapeutic measures being
45
+ used on patients in a clinical trial); (B) Post Marketing Drug Surveillance
46
+ (effects of a drug after approval in the general population or as part of
47
+ ‘standard healthcare’); (C) Drug repurposing (how a drug that has been
48
+ approved for one use is being applied to a new disease).
49
+
50
+ (B) identifies whether a title/abstract of a paper describes substantive research into Quality of Life (~10k papers);
51
+ - -1 - the paper is not a primary experimental study in rare disease
52
+ - 0 - the study does not directly investigate quality of life
53
+ - 1 - the study investigates qol but not as its primary contribution
54
+ - 2 - the study's primary contribution centers on quality of life measures
55
+
56
+ (C) identifies if a paper is a natural history study (~10k papers).
57
+ - -1 - the paper is not a primary experimental study in rare disease
58
+ - 0 - the study is not directly investigating the natural history of a disease
59
+ - 1 - the study includes some elements a natural history but not as its primary contribution
60
+ - 2 - the study's primary contribution centers on observing the time course of a rare disease
61
+
62
+ These classifications are particularly relevant in rare disease research, a field that is generally understudied.
63
+
64
+ ## Citation Information
65
+
66
+ ```
67
+ # N/A
68
+ ```
__init__.py ADDED
File without changes
bigbiohub.py ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from dataclasses import dataclass
3
+ from enum import Enum
4
+ import logging
5
+ from pathlib import Path
6
+ from types import SimpleNamespace
7
+ from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
8
+
9
+ import datasets
10
+
11
+ if TYPE_CHECKING:
12
+ import bioc
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
18
+
19
+
20
+ @dataclass
21
+ class BigBioConfig(datasets.BuilderConfig):
22
+ """BuilderConfig for BigBio."""
23
+
24
+ name: str = None
25
+ version: datasets.Version = None
26
+ description: str = None
27
+ schema: str = None
28
+ subset_id: str = None
29
+
30
+
31
+ class Tasks(Enum):
32
+ NAMED_ENTITY_RECOGNITION = "NER"
33
+ NAMED_ENTITY_DISAMBIGUATION = "NED"
34
+ EVENT_EXTRACTION = "EE"
35
+ RELATION_EXTRACTION = "RE"
36
+ COREFERENCE_RESOLUTION = "COREF"
37
+ QUESTION_ANSWERING = "QA"
38
+ TEXTUAL_ENTAILMENT = "TE"
39
+ SEMANTIC_SIMILARITY = "STS"
40
+ TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
41
+ PARAPHRASING = "PARA"
42
+ TRANSLATION = "TRANSL"
43
+ SUMMARIZATION = "SUM"
44
+ TEXT_CLASSIFICATION = "TXTCLASS"
45
+
46
+
47
+ entailment_features = datasets.Features(
48
+ {
49
+ "id": datasets.Value("string"),
50
+ "premise": datasets.Value("string"),
51
+ "hypothesis": datasets.Value("string"),
52
+ "label": datasets.Value("string"),
53
+ }
54
+ )
55
+
56
+ pairs_features = datasets.Features(
57
+ {
58
+ "id": datasets.Value("string"),
59
+ "document_id": datasets.Value("string"),
60
+ "text_1": datasets.Value("string"),
61
+ "text_2": datasets.Value("string"),
62
+ "label": datasets.Value("string"),
63
+ }
64
+ )
65
+
66
+ qa_features = datasets.Features(
67
+ {
68
+ "id": datasets.Value("string"),
69
+ "question_id": datasets.Value("string"),
70
+ "document_id": datasets.Value("string"),
71
+ "question": datasets.Value("string"),
72
+ "type": datasets.Value("string"),
73
+ "choices": [datasets.Value("string")],
74
+ "context": datasets.Value("string"),
75
+ "answer": datasets.Sequence(datasets.Value("string")),
76
+ }
77
+ )
78
+
79
+ text_features = datasets.Features(
80
+ {
81
+ "id": datasets.Value("string"),
82
+ "document_id": datasets.Value("string"),
83
+ "text": datasets.Value("string"),
84
+ "labels": [datasets.Value("string")],
85
+ }
86
+ )
87
+
88
+ text2text_features = datasets.Features(
89
+ {
90
+ "id": datasets.Value("string"),
91
+ "document_id": datasets.Value("string"),
92
+ "text_1": datasets.Value("string"),
93
+ "text_2": datasets.Value("string"),
94
+ "text_1_name": datasets.Value("string"),
95
+ "text_2_name": datasets.Value("string"),
96
+ }
97
+ )
98
+
99
+ kb_features = datasets.Features(
100
+ {
101
+ "id": datasets.Value("string"),
102
+ "document_id": datasets.Value("string"),
103
+ "passages": [
104
+ {
105
+ "id": datasets.Value("string"),
106
+ "type": datasets.Value("string"),
107
+ "text": datasets.Sequence(datasets.Value("string")),
108
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
109
+ }
110
+ ],
111
+ "entities": [
112
+ {
113
+ "id": datasets.Value("string"),
114
+ "type": datasets.Value("string"),
115
+ "text": datasets.Sequence(datasets.Value("string")),
116
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
117
+ "normalized": [
118
+ {
119
+ "db_name": datasets.Value("string"),
120
+ "db_id": datasets.Value("string"),
121
+ }
122
+ ],
123
+ }
124
+ ],
125
+ "events": [
126
+ {
127
+ "id": datasets.Value("string"),
128
+ "type": datasets.Value("string"),
129
+ # refers to the text_bound_annotation of the trigger
130
+ "trigger": {
131
+ "text": datasets.Sequence(datasets.Value("string")),
132
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
133
+ },
134
+ "arguments": [
135
+ {
136
+ "role": datasets.Value("string"),
137
+ "ref_id": datasets.Value("string"),
138
+ }
139
+ ],
140
+ }
141
+ ],
142
+ "coreferences": [
143
+ {
144
+ "id": datasets.Value("string"),
145
+ "entity_ids": datasets.Sequence(datasets.Value("string")),
146
+ }
147
+ ],
148
+ "relations": [
149
+ {
150
+ "id": datasets.Value("string"),
151
+ "type": datasets.Value("string"),
152
+ "arg1_id": datasets.Value("string"),
153
+ "arg2_id": datasets.Value("string"),
154
+ "normalized": [
155
+ {
156
+ "db_name": datasets.Value("string"),
157
+ "db_id": datasets.Value("string"),
158
+ }
159
+ ],
160
+ }
161
+ ],
162
+ }
163
+ )
164
+
165
+
166
+ TASK_TO_SCHEMA = {
167
+ Tasks.NAMED_ENTITY_RECOGNITION.name: "KB",
168
+ Tasks.NAMED_ENTITY_DISAMBIGUATION.name: "KB",
169
+ Tasks.EVENT_EXTRACTION.name: "KB",
170
+ Tasks.RELATION_EXTRACTION.name: "KB",
171
+ Tasks.COREFERENCE_RESOLUTION.name: "KB",
172
+ Tasks.QUESTION_ANSWERING.name: "QA",
173
+ Tasks.TEXTUAL_ENTAILMENT.name: "TE",
174
+ Tasks.SEMANTIC_SIMILARITY.name: "PAIRS",
175
+ Tasks.TEXT_PAIRS_CLASSIFICATION.name: "PAIRS",
176
+ Tasks.PARAPHRASING.name: "T2T",
177
+ Tasks.TRANSLATION.name: "T2T",
178
+ Tasks.SUMMARIZATION.name: "T2T",
179
+ Tasks.TEXT_CLASSIFICATION.name: "TEXT",
180
+ }
181
+
182
+ SCHEMA_TO_TASKS = defaultdict(set)
183
+ for task, schema in TASK_TO_SCHEMA.items():
184
+ SCHEMA_TO_TASKS[schema].add(task)
185
+ SCHEMA_TO_TASKS = dict(SCHEMA_TO_TASKS)
186
+
187
+ VALID_TASKS = set(TASK_TO_SCHEMA.keys())
188
+ VALID_SCHEMAS = set(TASK_TO_SCHEMA.values())
189
+
190
+ SCHEMA_TO_FEATURES = {
191
+ "KB": kb_features,
192
+ "QA": qa_features,
193
+ "TE": entailment_features,
194
+ "T2T": text2text_features,
195
+ "TEXT": text_features,
196
+ "PAIRS": pairs_features,
197
+ }
198
+
199
+
200
+ def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
201
+
202
+ offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
203
+
204
+ text = ann.text
205
+
206
+ if len(offsets) > 1:
207
+ i = 0
208
+ texts = []
209
+ for start, end in offsets:
210
+ chunk_len = end - start
211
+ texts.append(text[i : chunk_len + i])
212
+ i += chunk_len
213
+ while i < len(text) and text[i] == " ":
214
+ i += 1
215
+ else:
216
+ texts = [text]
217
+
218
+ return offsets, texts
219
+
220
+
221
+ def remove_prefix(a: str, prefix: str) -> str:
222
+ if a.startswith(prefix):
223
+ a = a[len(prefix) :]
224
+ return a
225
+
226
+
227
+ def parse_brat_file(
228
+ txt_file: Path,
229
+ annotation_file_suffixes: List[str] = None,
230
+ parse_notes: bool = False,
231
+ ) -> Dict:
232
+ """
233
+ Parse a brat file into the schema defined below.
234
+ `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
235
+ Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
236
+ e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
237
+ Will include annotator notes, when `parse_notes == True`.
238
+ brat_features = datasets.Features(
239
+ {
240
+ "id": datasets.Value("string"),
241
+ "document_id": datasets.Value("string"),
242
+ "text": datasets.Value("string"),
243
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
244
+ {
245
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
246
+ "text": datasets.Sequence(datasets.Value("string")),
247
+ "type": datasets.Value("string"),
248
+ "id": datasets.Value("string"),
249
+ }
250
+ ],
251
+ "events": [ # E line in brat
252
+ {
253
+ "trigger": datasets.Value(
254
+ "string"
255
+ ), # refers to the text_bound_annotation of the trigger,
256
+ "id": datasets.Value("string"),
257
+ "type": datasets.Value("string"),
258
+ "arguments": datasets.Sequence(
259
+ {
260
+ "role": datasets.Value("string"),
261
+ "ref_id": datasets.Value("string"),
262
+ }
263
+ ),
264
+ }
265
+ ],
266
+ "relations": [ # R line in brat
267
+ {
268
+ "id": datasets.Value("string"),
269
+ "head": {
270
+ "ref_id": datasets.Value("string"),
271
+ "role": datasets.Value("string"),
272
+ },
273
+ "tail": {
274
+ "ref_id": datasets.Value("string"),
275
+ "role": datasets.Value("string"),
276
+ },
277
+ "type": datasets.Value("string"),
278
+ }
279
+ ],
280
+ "equivalences": [ # Equiv line in brat
281
+ {
282
+ "id": datasets.Value("string"),
283
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
284
+ }
285
+ ],
286
+ "attributes": [ # M or A lines in brat
287
+ {
288
+ "id": datasets.Value("string"),
289
+ "type": datasets.Value("string"),
290
+ "ref_id": datasets.Value("string"),
291
+ "value": datasets.Value("string"),
292
+ }
293
+ ],
294
+ "normalizations": [ # N lines in brat
295
+ {
296
+ "id": datasets.Value("string"),
297
+ "type": datasets.Value("string"),
298
+ "ref_id": datasets.Value("string"),
299
+ "resource_name": datasets.Value(
300
+ "string"
301
+ ), # Name of the resource, e.g. "Wikipedia"
302
+ "cuid": datasets.Value(
303
+ "string"
304
+ ), # ID in the resource, e.g. 534366
305
+ "text": datasets.Value(
306
+ "string"
307
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
308
+ }
309
+ ],
310
+ ### OPTIONAL: Only included when `parse_notes == True`
311
+ "notes": [ # # lines in brat
312
+ {
313
+ "id": datasets.Value("string"),
314
+ "type": datasets.Value("string"),
315
+ "ref_id": datasets.Value("string"),
316
+ "text": datasets.Value("string"),
317
+ }
318
+ ],
319
+ },
320
+ )
321
+ """
322
+
323
+ example = {}
324
+ example["document_id"] = txt_file.with_suffix("").name
325
+ with txt_file.open() as f:
326
+ example["text"] = f.read()
327
+
328
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
329
+ # for event extraction
330
+ if annotation_file_suffixes is None:
331
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
332
+
333
+ if len(annotation_file_suffixes) == 0:
334
+ raise AssertionError(
335
+ "At least one suffix for the to-be-read annotation files should be given!"
336
+ )
337
+
338
+ ann_lines = []
339
+ for suffix in annotation_file_suffixes:
340
+ annotation_file = txt_file.with_suffix(suffix)
341
+ if annotation_file.exists():
342
+ with annotation_file.open() as f:
343
+ ann_lines.extend(f.readlines())
344
+
345
+ example["text_bound_annotations"] = []
346
+ example["events"] = []
347
+ example["relations"] = []
348
+ example["equivalences"] = []
349
+ example["attributes"] = []
350
+ example["normalizations"] = []
351
+
352
+ if parse_notes:
353
+ example["notes"] = []
354
+
355
+ for line in ann_lines:
356
+ line = line.strip()
357
+ if not line:
358
+ continue
359
+
360
+ if line.startswith("T"): # Text bound
361
+ ann = {}
362
+ fields = line.split("\t")
363
+
364
+ ann["id"] = fields[0]
365
+ ann["type"] = fields[1].split()[0]
366
+ ann["offsets"] = []
367
+ span_str = remove_prefix(fields[1], (ann["type"] + " "))
368
+ text = fields[2]
369
+ for span in span_str.split(";"):
370
+ start, end = span.split()
371
+ ann["offsets"].append([int(start), int(end)])
372
+
373
+ # Heuristically split text of discontiguous entities into chunks
374
+ ann["text"] = []
375
+ if len(ann["offsets"]) > 1:
376
+ i = 0
377
+ for start, end in ann["offsets"]:
378
+ chunk_len = end - start
379
+ ann["text"].append(text[i : chunk_len + i])
380
+ i += chunk_len
381
+ while i < len(text) and text[i] == " ":
382
+ i += 1
383
+ else:
384
+ ann["text"] = [text]
385
+
386
+ example["text_bound_annotations"].append(ann)
387
+
388
+ elif line.startswith("E"):
389
+ ann = {}
390
+ fields = line.split("\t")
391
+
392
+ ann["id"] = fields[0]
393
+
394
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
395
+
396
+ ann["arguments"] = []
397
+ for role_ref_id in fields[1].split()[1:]:
398
+ argument = {
399
+ "role": (role_ref_id.split(":"))[0],
400
+ "ref_id": (role_ref_id.split(":"))[1],
401
+ }
402
+ ann["arguments"].append(argument)
403
+
404
+ example["events"].append(ann)
405
+
406
+ elif line.startswith("R"):
407
+ ann = {}
408
+ fields = line.split("\t")
409
+
410
+ ann["id"] = fields[0]
411
+ ann["type"] = fields[1].split()[0]
412
+
413
+ ann["head"] = {
414
+ "role": fields[1].split()[1].split(":")[0],
415
+ "ref_id": fields[1].split()[1].split(":")[1],
416
+ }
417
+ ann["tail"] = {
418
+ "role": fields[1].split()[2].split(":")[0],
419
+ "ref_id": fields[1].split()[2].split(":")[1],
420
+ }
421
+
422
+ example["relations"].append(ann)
423
+
424
+ # '*' seems to be the legacy way to mark equivalences,
425
+ # but I couldn't find any info on the current way
426
+ # this might have to be adapted dependent on the brat version
427
+ # of the annotation
428
+ elif line.startswith("*"):
429
+ ann = {}
430
+ fields = line.split("\t")
431
+
432
+ ann["id"] = fields[0]
433
+ ann["ref_ids"] = fields[1].split()[1:]
434
+
435
+ example["equivalences"].append(ann)
436
+
437
+ elif line.startswith("A") or line.startswith("M"):
438
+ ann = {}
439
+ fields = line.split("\t")
440
+
441
+ ann["id"] = fields[0]
442
+
443
+ info = fields[1].split()
444
+ ann["type"] = info[0]
445
+ ann["ref_id"] = info[1]
446
+
447
+ if len(info) > 2:
448
+ ann["value"] = info[2]
449
+ else:
450
+ ann["value"] = ""
451
+
452
+ example["attributes"].append(ann)
453
+
454
+ elif line.startswith("N"):
455
+ ann = {}
456
+ fields = line.split("\t")
457
+
458
+ ann["id"] = fields[0]
459
+ ann["text"] = fields[2]
460
+
461
+ info = fields[1].split()
462
+
463
+ ann["type"] = info[0]
464
+ ann["ref_id"] = info[1]
465
+ ann["resource_name"] = info[2].split(":")[0]
466
+ ann["cuid"] = info[2].split(":")[1]
467
+ example["normalizations"].append(ann)
468
+
469
+ elif parse_notes and line.startswith("#"):
470
+ ann = {}
471
+ fields = line.split("\t")
472
+
473
+ ann["id"] = fields[0]
474
+ ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
475
+
476
+ info = fields[1].split()
477
+
478
+ ann["type"] = info[0]
479
+ ann["ref_id"] = info[1]
480
+ example["notes"].append(ann)
481
+
482
+ return example
483
+
484
+
485
+ def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
486
+ """
487
+ Transform a brat parse (conforming to the standard brat schema) obtained with
488
+ `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
489
+ :param brat_parse:
490
+ """
491
+
492
+ unified_example = {}
493
+
494
+ # Prefix all ids with document id to ensure global uniqueness,
495
+ # because brat ids are only unique within their document
496
+ id_prefix = brat_parse["document_id"] + "_"
497
+
498
+ # identical
499
+ unified_example["document_id"] = brat_parse["document_id"]
500
+ unified_example["passages"] = [
501
+ {
502
+ "id": id_prefix + "_text",
503
+ "type": "abstract",
504
+ "text": [brat_parse["text"]],
505
+ "offsets": [[0, len(brat_parse["text"])]],
506
+ }
507
+ ]
508
+
509
+ # get normalizations
510
+ ref_id_to_normalizations = defaultdict(list)
511
+ for normalization in brat_parse["normalizations"]:
512
+ ref_id_to_normalizations[normalization["ref_id"]].append(
513
+ {
514
+ "db_name": normalization["resource_name"],
515
+ "db_id": normalization["cuid"],
516
+ }
517
+ )
518
+
519
+ # separate entities and event triggers
520
+ unified_example["events"] = []
521
+ non_event_ann = brat_parse["text_bound_annotations"].copy()
522
+ for event in brat_parse["events"]:
523
+ event = event.copy()
524
+ event["id"] = id_prefix + event["id"]
525
+ trigger = next(
526
+ tr
527
+ for tr in brat_parse["text_bound_annotations"]
528
+ if tr["id"] == event["trigger"]
529
+ )
530
+ if trigger in non_event_ann:
531
+ non_event_ann.remove(trigger)
532
+ event["trigger"] = {
533
+ "text": trigger["text"].copy(),
534
+ "offsets": trigger["offsets"].copy(),
535
+ }
536
+ for argument in event["arguments"]:
537
+ argument["ref_id"] = id_prefix + argument["ref_id"]
538
+
539
+ unified_example["events"].append(event)
540
+
541
+ unified_example["entities"] = []
542
+ anno_ids = [ref_id["id"] for ref_id in non_event_ann]
543
+ for ann in non_event_ann:
544
+ entity_ann = ann.copy()
545
+ entity_ann["id"] = id_prefix + entity_ann["id"]
546
+ entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
547
+ unified_example["entities"].append(entity_ann)
548
+
549
+ # massage relations
550
+ unified_example["relations"] = []
551
+ skipped_relations = set()
552
+ for ann in brat_parse["relations"]:
553
+ if (
554
+ ann["head"]["ref_id"] not in anno_ids
555
+ or ann["tail"]["ref_id"] not in anno_ids
556
+ ):
557
+ skipped_relations.add(ann["id"])
558
+ continue
559
+ unified_example["relations"].append(
560
+ {
561
+ "arg1_id": id_prefix + ann["head"]["ref_id"],
562
+ "arg2_id": id_prefix + ann["tail"]["ref_id"],
563
+ "id": id_prefix + ann["id"],
564
+ "type": ann["type"],
565
+ "normalized": [],
566
+ }
567
+ )
568
+ if len(skipped_relations) > 0:
569
+ example_id = brat_parse["document_id"]
570
+ logger.info(
571
+ f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
572
+ f" Skip (for now): "
573
+ f"{list(skipped_relations)}"
574
+ )
575
+
576
+ # get coreferences
577
+ unified_example["coreferences"] = []
578
+ for i, ann in enumerate(brat_parse["equivalences"], start=1):
579
+ is_entity_cluster = True
580
+ for ref_id in ann["ref_ids"]:
581
+ if not ref_id.startswith("T"): # not textbound -> no entity
582
+ is_entity_cluster = False
583
+ elif ref_id not in anno_ids: # event trigger -> no entity
584
+ is_entity_cluster = False
585
+ if is_entity_cluster:
586
+ entity_ids = [id_prefix + i for i in ann["ref_ids"]]
587
+ unified_example["coreferences"].append(
588
+ {"id": id_prefix + str(i), "entity_ids": entity_ids}
589
+ )
590
+ return unified_example
czi_drsm.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and Gully Burns.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Research Article document classification dataset based on aspects of disease research. Currently, the dataset consists of three subsets:
18
+
19
+ (A) classifies title/abstracts of papers into most popular subtypes of clinical, basic, and translational papers (~20k papers);
20
+ - Clinical Characteristics, Disease Pathology, and Diagnosis -
21
+ Text that describes (A) symptoms, signs, or ‘phenotype’ of a disease;
22
+ (B) the effects of the disease on patient organs, tissues, or cells;
23
+ (C) the results of clinical tests that reveal pathology (including
24
+ biomarkers); (D) research that use this information to figure out
25
+ a diagnosis.
26
+ - Therapeutics in the clinic -
27
+ Text describing how treatments work in the clinic (but not in a clinical trial).
28
+ - Disease mechanism -
29
+ Text that describes either (A) mechanistic involvement of specific genes in disease
30
+ (deletions, gain of function, etc); (B) how molecular signalling or metabolism
31
+ binding, activating, phosphorylation, concentration increase, etc.)
32
+ are involved in the mechanism of a disease; or (C) the physiological
33
+ mechanism of disease at the level of tissues, organs, and body systems.
34
+ - Patient-Based Therapeutics -
35
+ Text describing (A) Clinical trials (studies of therapeutic measures being
36
+ used on patients in a clinical trial); (B) Post Marketing Drug Surveillance
37
+ (effects of a drug after approval in the general population or as part of
38
+ ‘standard healthcare’); (C) Drug repurposing (how a drug that has been
39
+ approved for one use is being applied to a new disease).
40
+
41
+ (B) identifies whether a title/abstract of a paper describes substantive research into Quality of Life (~10k papers);
42
+ - -1 - the paper is not a primary experimental study in rare disease
43
+ - 0 - the study does not directly investigate quality of life
44
+ - 1 - the study investigates qol but not as its primary contribution
45
+ - 2 - the study's primary contribution centers on quality of life measures
46
+
47
+ (C) identifies if a paper is a natural history study (~10k papers).
48
+ ` - -1 - the paper is not a primary experimental study in rare disease
49
+ - 0 - the study is not directly investigating the natural history of a disease
50
+ - 1 - the study includes some elements a natural history but not as its primary contribution
51
+ - 2 - the study's primary contribution centers on observing the time course of a rare disease
52
+
53
+ These classifications are particularly relevant in rare disease research, a field that is generally understudied.
54
+ """
55
+
56
+ import os
57
+ from typing import List, Tuple, Dict
58
+
59
+ import datasets
60
+ import pandas as pd
61
+ from pathlib import Path
62
+
63
+ import bigbio.utils.parsing as parse
64
+ from bigbio.utils import schemas
65
+ from bigbio.utils.configs import BigBioConfig
66
+ from bigbio.utils.constants import Lang, Tasks
67
+ from bigbio.utils.license import Licenses
68
+
69
+ #from .bigbiohub import BigBioConfig
70
+ #from .bigbiohub import Tasks
71
+
72
+ #from .bigbiohub import
73
+
74
+ _LOCAL = False
75
+
76
+ _CITATION = """\
77
+ @article{,
78
+ author = {},
79
+ title = {},
80
+ journal = {},
81
+ volume = {},
82
+ year = {},
83
+ url = {},
84
+ doi = {},
85
+ biburl = {},
86
+ bibsource = {}
87
+ }
88
+ """
89
+
90
+ _DATASETNAME = "czi_drsm"
91
+
92
+ _DESCRIPTION = """\
93
+ Research Article document classification dataset based on aspects of disease research. Currently, the dataset consists of three subsets:
94
+
95
+ (A) classifies title/abstracts of papers into most popular subtypes of clinical, basic, and translational papers (~20k papers);
96
+ - Clinical Characteristics, Disease Pathology, and Diagnosis -
97
+ Text that describes (A) symptoms, signs, or ‘phenotype’ of a disease;
98
+ (B) the effects of the disease on patient organs, tissues, or cells;
99
+ (C) the results of clinical tests that reveal pathology (including
100
+ biomarkers); (D) research that use this information to figure out
101
+ a diagnosis.
102
+ - Therapeutics in the clinic -
103
+ Text describing how treatments work in the clinic (but not in a clinical trial).
104
+ - Disease mechanism -
105
+ Text that describes either (A) mechanistic involvement of specific genes in disease
106
+ (deletions, gain of function, etc); (B) how molecular signalling or metabolism
107
+ binding, activating, phosphorylation, concentration increase, etc.)
108
+ are involved in the mechanism of a disease; or (C) the physiological
109
+ mechanism of disease at the level of tissues, organs, and body systems.
110
+ - Patient-Based Therapeutics -
111
+ Text describing (A) Clinical trials (studies of therapeutic measures being
112
+ used on patients in a clinical trial); (B) Post Marketing Drug Surveillance
113
+ (effects of a drug after approval in the general population or as part of
114
+ ‘standard healthcare’); (C) Drug repurposing (how a drug that has been
115
+ approved for one use is being applied to a new disease).
116
+
117
+ (B) identifies whether a title/abstract of a paper describes substantive research into Quality of Life (~10k papers);
118
+ - -1 - the paper is not a primary experimental study in rare disease
119
+ - 0 - the study does not directly investigate quality of life
120
+ - 1 - the study investigates qol but not as its primary contribution
121
+ - 2 - the study's primary contribution centers on quality of life measures
122
+
123
+ (C) identifies if a paper is a natural history study (~10k papers).
124
+ ` - -1 - the paper is not a primary experimental study in rare disease
125
+ - 0 - the study is not directly investigating the natural history of a disease
126
+ - 1 - the study includes some elements a natural history but not as its primary contribution
127
+ - 2 - the study's primary contribution centers on observing the time course of a rare disease
128
+
129
+ These classifications are particularly relevant in rare disease research, a field that is generally understudied.
130
+ """
131
+
132
+ _HOMEPAGE = "https://github.com/chanzuckerberg/DRSM-corpus/"
133
+ _LICENSE = "CC0_1p0"
134
+
135
+ _LANGUAGES = ['English']
136
+ _PUBMED = False
137
+ _LOCAL = False
138
+ _DISPLAYNAME = "DRSM Corpus"
139
+
140
+ # For publicly available datasets you will most likely end up passing these URLs to dl_manager in _split_generators.
141
+ # In most cases the URLs will be the same for the source and bigbio config.
142
+ # However, if you need to access different files for each config you can have multiple entries in this dict.
143
+ # This can be an arbitrarily nested dict/list of URLs (see below in `_split_generators` method)
144
+ _URLS = {
145
+ 'base': "https://raw.githubusercontent.com/chanzuckerberg/DRSM-corpus/main/v1/drsm_corpus_v1.tsv",
146
+ 'qol': "https://raw.githubusercontent.com/chanzuckerberg/DRSM-corpus/main/v2/qol_all_2022_12_15.tsv",
147
+ 'nhs': "https://raw.githubusercontent.com/chanzuckerberg/DRSM-corpus/main/v2/nhs_all_2023_03_31.tsv"
148
+ }
149
+
150
+ _SUPPORTED_TASKS = [Tasks.TEXT_CLASSIFICATION]
151
+
152
+ _SOURCE_VERSION = "1.0.0"
153
+ _BIGBIO_VERSION = "1.0.0"
154
+
155
+ _CLASS_NAMES_BASE = [
156
+ "clinical characteristics or disease pathology",
157
+ "therapeutics in the clinic",
158
+ "disease mechanism",
159
+ "patient-based therapeutics",
160
+ "other",
161
+ "irrelevant"
162
+ ]
163
+
164
+ _CLASS_NAMES_QOL = [
165
+ "-1 - the paper is not a primary experimental study in rare disease",
166
+ "0 - the study does not directly investigate quality of life",
167
+ "1 - the study investigates qol but not as its primary contribution",
168
+ "2 - the study's primary contribution centers on quality of life measures"
169
+ ]
170
+
171
+ _CLASS_NAMES_NHS = [
172
+ "-1 - the paper is not a primary experimental study in rare disease",
173
+ "0 - the study is not directly investigating the natural history of a disease",
174
+ "1 - the study includes some elements a natural history but not as its primary contribution",
175
+ "2 - the study's primary contribution centers on observing the time course of a rare disease"
176
+ ]
177
+
178
+ class DRSMBaseDataset(datasets.GeneratorBasedBuilder):
179
+ """DRSM Document Classification Datasets."""
180
+
181
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
182
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
183
+
184
+ # You will be able to load the "source" or "bigbio" configurations with
185
+ #ds_source = datasets.load_dataset('drsm_source_dataset', name='source')
186
+ #ds_bigbio = datasets.load_dataset('drsm_bigbio_dataset', name='bigbio')
187
+
188
+ # For local datasets you can make use of the `data_dir` and `data_files` kwargs
189
+ # https://huggingface.co/docs/datasets/add_dataset.html#downloading-data-files-and-organizing-splits
190
+ # ds_source = datasets.load_dataset('my_dataset', name='source', data_dir="/path/to/data/files")
191
+ # ds_bigbio = datasets.load_dataset('my_dataset', name='bigbio', data_dir="/path/to/data/files")
192
+
193
+ # TODO: For each dataset, implement Config for Source and BigBio;
194
+ # If dataset contains more than one subset (see examples/bioasq.py) implement for EACH of them.
195
+ # Each of them should contain:
196
+ # - name: should be unique for each dataset config eg. bioasq10b_(source|bigbio)_[bigbio_schema_name]
197
+ # - version: option = (SOURCE_VERSION|BIGBIO_VERSION)
198
+ # - description: one line description for the dataset
199
+ # - schema: options = (source|bigbio_[bigbio_schema_name])
200
+ # - subset_id: subset id is the canonical name for the dataset (eg. bioasq10b)
201
+ # where [bigbio_schema_name] = ()
202
+
203
+ BUILDER_CONFIGS = [
204
+ BigBioConfig(
205
+ name="czi_drsm_base_source",
206
+ version=SOURCE_VERSION,
207
+ description="czi_drsm base source schema",
208
+ schema="base_source",
209
+ subset_id="czi_drsm_base",
210
+ ),
211
+ BigBioConfig(
212
+ name="czi_drsm_bigbio_base_text",
213
+ version=BIGBIO_VERSION,
214
+ description="czi_drsm base BigBio schema",
215
+ schema="bigbio_text",
216
+ subset_id="czi_drsm_base",
217
+ ),
218
+ BigBioConfig(
219
+ name="czi_drsm_qol_source",
220
+ version=SOURCE_VERSION,
221
+ description="czi_drsm source schema for Quality of Life studies",
222
+ schema="qol_source",
223
+ subset_id="czi_drsm_qol",
224
+ ),
225
+ BigBioConfig(
226
+ name="czi_drsm_bigbio_qol_text",
227
+ version=BIGBIO_VERSION,
228
+ description="czi_drsm BigBio schema for Quality of Life studies",
229
+ schema="bigbio_text",
230
+ subset_id="czi_drsm_qol",
231
+ ),
232
+ BigBioConfig(
233
+ name="czi_drsm_nhs_source",
234
+ version=SOURCE_VERSION,
235
+ description="czi_drsm source schema for Natural History Studies",
236
+ schema="nhs_source",
237
+ subset_id="czi_drsm_nhs",
238
+ ),
239
+ BigBioConfig(
240
+ name="czi_drsm_bigbio_nhs_text",
241
+ version=BIGBIO_VERSION,
242
+ description="czi_drsm BigBio schema for Natural History Studies",
243
+ schema="bigbio_text",
244
+ subset_id="czi_drsm_nhs",
245
+ ),
246
+ ]
247
+
248
+ DEFAULT_CONFIG_NAME = "czi_drsm_bigbio_base_text"
249
+
250
+ def _info(self) -> datasets.DatasetInfo:
251
+
252
+ # Create the source schema; this schema will keep all keys/information/labels as close to the original dataset as possible.
253
+
254
+ # You can arbitrarily nest lists and dictionaries.
255
+ # For iterables, use lists over tuples or `datasets.Sequence`
256
+
257
+ if self.config.schema == "base_source":
258
+ features = datasets.Features(
259
+ {
260
+ "document_id": datasets.Value("string"),
261
+ "labeling_state": datasets.Value("string"),
262
+ "explanation": datasets.Value("string"),
263
+ "correct_label": [datasets.ClassLabel(names=_CLASS_NAMES_BASE)],
264
+ "agreement": [datasets.Value("string")],
265
+ "title": [datasets.Value("string")],
266
+ "abstract": [datasets.Value("string")],
267
+ }
268
+ )
269
+
270
+ elif self.config.schema == "qol_source":
271
+ features = datasets.Features(
272
+ {
273
+ "document_id": datasets.Value("string"),
274
+ "labeling_state": datasets.Value("string"),
275
+ "correct_label": [datasets.ClassLabel(names=_CLASS_NAMES_QOL)],
276
+ "explanation": datasets.Value("string"),
277
+ "agreement": [datasets.Value("string")],
278
+ "title": [datasets.Value("string")],
279
+ "abstract": [datasets.Value("string")]
280
+ }
281
+ )
282
+
283
+ elif self.config.schema == "nhs_source":
284
+ features = datasets.Features(
285
+ {
286
+ "document_id": datasets.Value("string"),
287
+ "labeling_state": datasets.Value("string"),
288
+ "correct_label": [datasets.ClassLabel(names=_CLASS_NAMES_NHS)],
289
+ "explanation": datasets.Value("string"),
290
+ "agreement": [datasets.Value("string")],
291
+ "title": [datasets.Value("string")],
292
+ "abstract": [datasets.Value("string")],
293
+ }
294
+ )
295
+
296
+ # For example bigbio_kb, bigbio_t2t
297
+ elif self.config.schema == "bigbio_text":
298
+ features = schemas.text_features
299
+
300
+ return datasets.DatasetInfo(
301
+ description=_DESCRIPTION,
302
+ features=features,
303
+ homepage=_HOMEPAGE,
304
+ license=_LICENSE,
305
+ citation=_CITATION,
306
+ )
307
+
308
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
309
+ """Returns SplitGenerators."""
310
+
311
+ if 'base' in self.config.name:
312
+ url = _URLS['base']
313
+ elif 'qol' in self.config.name:
314
+ url = _URLS['qol']
315
+ elif 'nhs' in self.config.name:
316
+ url = _URLS['nhs']
317
+ else:
318
+ raise ValueError("Invalid config name: {}".format(self.config.name))
319
+
320
+ data_file = dl_manager.download_and_extract(url)
321
+ df = pd.read_csv(data_file, sep="\t", encoding="utf-8").fillna('')
322
+
323
+ # load tsv file into huggingface dataset
324
+ ds = datasets.Dataset.from_pandas(df)
325
+
326
+ # generate train_test split
327
+ ds_dict = ds.train_test_split(test_size=0.2, seed=42)
328
+ ds_dict2 = ds_dict['test'].train_test_split(test_size=0.5, seed=42)
329
+
330
+ # dump train, val, test to disk
331
+ data_dir = Path(data_file).parent
332
+ ds_dict['train'].to_csv(data_dir / "train.tsv", sep="\t", index=False)
333
+ ds_dict2['train'].to_csv(data_dir / "validation.tsv", sep="\t", index=False)
334
+ ds_dict2['test'].to_csv(data_dir / "test.tsv", sep="\t", index=False)
335
+
336
+ return [
337
+ datasets.SplitGenerator(
338
+ name=datasets.Split.TRAIN,
339
+ gen_kwargs={
340
+ "filepath": data_dir / "train.tsv",
341
+ "split": "train",
342
+ },
343
+ ),
344
+ datasets.SplitGenerator(
345
+ name=datasets.Split.VALIDATION,
346
+ gen_kwargs={
347
+ "filepath": data_dir / "validation.tsv",
348
+ "split": "validation",
349
+ },
350
+ ),
351
+ datasets.SplitGenerator(
352
+ name=datasets.Split.TEST,
353
+ gen_kwargs={
354
+ "filepath": data_dir / "test.tsv",
355
+ "split": "test",
356
+ },
357
+ )
358
+ ]
359
+
360
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
361
+ def _generate_examples(self, filepath, split) -> Tuple[int, Dict]:
362
+ """Yields examples as (key, example) tuples."""
363
+ df = pd.read_csv(filepath, sep="\t", encoding="utf-8").fillna('')
364
+ print(len(df))
365
+ for id_, l in df.iterrows():
366
+ if self.config.subset_id == "czi_drsm_base":
367
+ doc_id = l[0]
368
+ labeling_state = l[1]
369
+ correct_label = l[2]
370
+ agreement = l[3]
371
+ explanation = l[4]
372
+ title = l[5]
373
+ abstract = l[6]
374
+ elif self.config.subset_id == "czi_drsm_qol":
375
+ doc_id = l[0]
376
+ labeling_state = l[1]
377
+ correct_label = l[2][1:-1]
378
+ explanation = l[3]
379
+ agreement = l[4]
380
+ title = l[5]
381
+ abstract = l[6]
382
+ elif self.config.subset_id == "czi_drsm_nhs":
383
+ doc_id = l[0]
384
+ labeling_state = l[1]
385
+ correct_label = l[2][1:-1]
386
+ explanation = ''
387
+ agreement = l[3]
388
+ title = l[4]
389
+ abstract = l[5]
390
+
391
+ if "_source" in self.config.schema:
392
+ yield id_, {
393
+ "document_id": doc_id,
394
+ "labeling_state": labeling_state,
395
+ "explanation": explanation,
396
+ "correct_label": [correct_label],
397
+ "agreement": str(agreement),
398
+ "title": title,
399
+ "abstract": abstract
400
+ }
401
+ elif self.config.schema == "bigbio_text":
402
+ yield id_, {
403
+ "id": id_,
404
+ "document_id": doc_id,
405
+ "text": title + " " + abstract,
406
+ "labels": [correct_label]
407
+ }
408
+
409
+ # This template is based on the following template from the datasets package:
410
+ # https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py