Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
08eda35
·
1 Parent(s): 6b5366a

upload hubscripts/chia_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. chia.py +647 -0
chia.py ADDED
@@ -0,0 +1,647 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ A large annotated corpus of patient eligibility criteria extracted from 1,000
17
+ interventional, Phase IV clinical trials registered in ClinicalTrials.gov. This
18
+ dataset includes 12,409 annotated eligibility criteria, represented by 41,487
19
+ distinctive entities of 15 entity types and 25,017 relationships of 12
20
+ relationship types."""
21
+ from pathlib import Path
22
+ from typing import Dict, Iterator, List, Tuple
23
+
24
+ import datasets
25
+
26
+ from .bigbiohub import kb_features
27
+ from .bigbiohub import BigBioConfig
28
+ from .bigbiohub import Tasks
29
+
30
+ _LANGUAGES = ['English']
31
+ _PUBMED = False
32
+ _LOCAL = False
33
+ _CITATION = """\
34
+ @article{kury2020chia,
35
+ title = {Chia, a large annotated corpus of clinical trial eligibility criteria},
36
+ author = {
37
+ Kury, Fabr{\'\\i}cio and Butler, Alex and Yuan, Chi and Fu, Li-heng and
38
+ Sun, Yingcheng and Liu, Hao and Sim, Ida and Carini, Simona and Weng,
39
+ Chunhua
40
+ },
41
+ year = 2020,
42
+ journal = {Scientific data},
43
+ publisher = {Nature Publishing Group},
44
+ volume = 7,
45
+ number = 1,
46
+ pages = {1--11}
47
+ }
48
+ """
49
+
50
+ _DATASETNAME = "chia"
51
+ _DISPLAYNAME = "CHIA"
52
+
53
+ _DESCRIPTION = """\
54
+ A large annotated corpus of patient eligibility criteria extracted from 1,000
55
+ interventional, Phase IV clinical trials registered in ClinicalTrials.gov. This
56
+ dataset includes 12,409 annotated eligibility criteria, represented by 41,487
57
+ distinctive entities of 15 entity types and 25,017 relationships of 12
58
+ relationship types.
59
+ """
60
+
61
+ _HOMEPAGE = "https://github.com/WengLab-InformaticsResearch/CHIA"
62
+
63
+ _LICENSE = 'Creative Commons Attribution 4.0 International'
64
+
65
+ _URLS = {
66
+ _DATASETNAME: "https://figshare.com/ndownloader/files/21728850",
67
+ _DATASETNAME + "_wo_scope": "https://figshare.com/ndownloader/files/21728853",
68
+ }
69
+
70
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
71
+
72
+ _SOURCE_VERSION = "2.0.0"
73
+ _BIGBIO_VERSION = "1.0.0"
74
+
75
+ # For further information see appendix of the publication
76
+ _DOMAIN_ENTITY_TYPES = [
77
+ "Condition",
78
+ "Device",
79
+ "Drug",
80
+ "Measurement",
81
+ "Observation",
82
+ "Person",
83
+ "Procedure",
84
+ "Visit",
85
+ ]
86
+
87
+ # For further information see appendix of the publication
88
+ _FIELD_ENTITY_TYPES = [
89
+ "Temporal",
90
+ "Value",
91
+ ]
92
+
93
+ # For further information see appendix of the publication
94
+ _CONSTRUCT_ENTITY_TYPES = [
95
+ "Scope", # Not part of the "without scope" schema / version
96
+ "Negation",
97
+ "Multiplier",
98
+ "Qualifier",
99
+ "Reference_point",
100
+ "Mood",
101
+ ]
102
+
103
+ _ALL_ENTITY_TYPES = _DOMAIN_ENTITY_TYPES + _FIELD_ENTITY_TYPES + _CONSTRUCT_ENTITY_TYPES
104
+
105
+ _RELATION_TYPES = [
106
+ "AND",
107
+ "OR",
108
+ "SUBSUMES",
109
+ "HAS_NEGATION",
110
+ "HAS_MULTIPLIER",
111
+ "HAS_QUALIFIER",
112
+ "HAS_VALUE",
113
+ "HAS_TEMPORAL",
114
+ "HAS_INDEX",
115
+ "HAS_MOOD",
116
+ "HAS_CONTEXT ",
117
+ "HAS_SCOPE", # Not part of the "without scope" schema / version
118
+ ]
119
+
120
+ _MAX_OFFSET_CORRECTION = 100
121
+
122
+
123
+ class ChiaDataset(datasets.GeneratorBasedBuilder):
124
+ """
125
+ A large annotated corpus of patient eligibility criteria extracted from 1,000 interventional,
126
+ Phase IV clinical trials registered in ClinicalTrials.gov.
127
+ """
128
+
129
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
130
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
131
+
132
+ BUILDER_CONFIGS = [
133
+ BigBioConfig(
134
+ name="chia_source",
135
+ version=SOURCE_VERSION,
136
+ description="Chia source schema",
137
+ schema="source",
138
+ subset_id="chia",
139
+ ),
140
+ BigBioConfig(
141
+ name="chia_fixed_source",
142
+ version=SOURCE_VERSION,
143
+ description="Chia source schema (with fixed entity offsets)",
144
+ schema="source",
145
+ subset_id="chia_fixed",
146
+ ),
147
+ BigBioConfig(
148
+ name="chia_without_scope_source",
149
+ version=SOURCE_VERSION,
150
+ description="Chia without scope source schema",
151
+ schema="source",
152
+ subset_id="chia_without_scope",
153
+ ),
154
+ BigBioConfig(
155
+ name="chia_without_scope_fixed_source",
156
+ version=SOURCE_VERSION,
157
+ description="Chia without scope source schema (with fixed entity offsets)",
158
+ schema="source",
159
+ subset_id="chia_without_scope_fixed",
160
+ ),
161
+ BigBioConfig(
162
+ name="chia_bigbio_kb",
163
+ version=BIGBIO_VERSION,
164
+ description="Chia BigBio schema",
165
+ schema="bigbio_kb",
166
+ subset_id="chia",
167
+ ),
168
+ ]
169
+
170
+ DEFAULT_CONFIG_NAME = "chia_source"
171
+
172
+ def _info(self):
173
+ if self.config.schema == "source":
174
+ features = datasets.Features(
175
+ {
176
+ "id": datasets.Value("string"),
177
+ "document_id": datasets.Value(
178
+ "string"
179
+ ), # NCT-ID from clinicialtrials.gov
180
+ "text": datasets.Value("string"),
181
+ "text_type": datasets.Value(
182
+ "string"
183
+ ), # inclusion or exclusion (criteria)
184
+ "entities": [
185
+ {
186
+ "id": datasets.Value("string"),
187
+ "type": datasets.Value("string"),
188
+ "text": datasets.Sequence(datasets.Value("string")),
189
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
190
+ "normalized": [
191
+ {
192
+ "db_name": datasets.Value("string"),
193
+ "db_id": datasets.Value("string"),
194
+ }
195
+ ],
196
+ }
197
+ ],
198
+ "relations": [
199
+ {
200
+ "id": datasets.Value("string"),
201
+ "type": datasets.Value("string"),
202
+ "arg1_id": datasets.Value("string"),
203
+ "arg2_id": datasets.Value("string"),
204
+ "normalized": [
205
+ {
206
+ "db_name": datasets.Value("string"),
207
+ "db_id": datasets.Value("string"),
208
+ }
209
+ ],
210
+ }
211
+ ],
212
+ }
213
+ )
214
+
215
+ elif self.config.schema == "bigbio_kb":
216
+ features = kb_features
217
+
218
+ return datasets.DatasetInfo(
219
+ description=_DESCRIPTION,
220
+ features=features,
221
+ homepage=_HOMEPAGE,
222
+ license=str(_LICENSE),
223
+ citation=_CITATION,
224
+ )
225
+
226
+ def _split_generators(self, dl_manager):
227
+ url_key = _DATASETNAME
228
+
229
+ if self.config.subset_id.startswith("chia_without_scope"):
230
+ url_key += "_wo_scope"
231
+
232
+ urls = _URLS[url_key]
233
+ data_dir = Path(dl_manager.download_and_extract(urls))
234
+
235
+ return [
236
+ datasets.SplitGenerator(
237
+ name=datasets.Split.TRAIN,
238
+ gen_kwargs={"data_dir": data_dir},
239
+ )
240
+ ]
241
+
242
+ def _generate_examples(self, data_dir: Path) -> Iterator[Tuple[str, Dict]]:
243
+ if self.config.schema == "source":
244
+ fix_offsets = "fixed" in self.config.subset_id
245
+
246
+ for file in data_dir.iterdir():
247
+ if not file.name.endswith(".txt"):
248
+ continue
249
+
250
+ brat_example = parse_brat_file(file, [".ann"])
251
+ source_example = self._to_source_example(
252
+ file, brat_example, fix_offsets
253
+ )
254
+ yield source_example["id"], source_example
255
+
256
+ elif self.config.schema == "bigbio_kb":
257
+ for file in data_dir.iterdir():
258
+ if not file.name.endswith(".txt"):
259
+ continue
260
+
261
+ brat_example = parse_brat_file(file, [".ann"])
262
+ source_example = self._to_source_example(file, brat_example, True)
263
+
264
+ bigbio_example = {
265
+ "id": source_example["id"],
266
+ "document_id": source_example["document_id"],
267
+ "passages": [
268
+ {
269
+ "id": source_example["id"] + "_text",
270
+ "type": source_example["text_type"],
271
+ "text": [source_example["text"]],
272
+ "offsets": [[0, len(source_example["text"])]],
273
+ }
274
+ ],
275
+ "entities": source_example["entities"],
276
+ "relations": source_example["relations"],
277
+ "events": [],
278
+ "coreferences": [],
279
+ }
280
+
281
+ yield bigbio_example["id"], bigbio_example
282
+
283
+ def _to_source_example(
284
+ self, input_file: Path, brat_example: Dict, fix_offsets: bool
285
+ ) -> Dict:
286
+ """
287
+ Converts the generic brat example to the source schema format.
288
+ """
289
+ example_id = str(input_file.stem)
290
+ document_id = example_id.split("_")[0]
291
+ criteria_type = "inclusion" if "_inc" in input_file.stem else "exclusion"
292
+
293
+ text = brat_example["text"]
294
+
295
+ source_example = {
296
+ "id": example_id,
297
+ "document_id": document_id,
298
+ "text_type": criteria_type,
299
+ "text": text,
300
+ "entities": [],
301
+ "relations": [],
302
+ }
303
+
304
+ example_prefix = example_id + "_"
305
+ entity_ids = {}
306
+
307
+ for tb_annotation in brat_example["text_bound_annotations"]:
308
+ if tb_annotation["type"].capitalize() not in _ALL_ENTITY_TYPES:
309
+ continue
310
+
311
+ entity_ann = tb_annotation.copy()
312
+ entity_ann["id"] = example_prefix + entity_ann["id"]
313
+ entity_ids[entity_ann["id"]] = True
314
+
315
+ if fix_offsets:
316
+ if len(entity_ann["offsets"]) > 1:
317
+ entity_ann["text"] = self._get_texts_for_multiple_offsets(
318
+ text, entity_ann["offsets"]
319
+ )
320
+
321
+ fixed_offsets = []
322
+ fixed_texts = []
323
+ for entity_text, offsets in zip(
324
+ entity_ann["text"], entity_ann["offsets"]
325
+ ):
326
+ fixed_offset = self._fix_entity_offsets(text, entity_text, offsets)
327
+ fixed_offsets.append(fixed_offset)
328
+ fixed_texts.append(text[fixed_offset[0] : fixed_offset[1]])
329
+
330
+ entity_ann["offsets"] = fixed_offsets
331
+ entity_ann["text"] = fixed_texts
332
+
333
+ entity_ann["normalized"] = []
334
+ source_example["entities"].append(entity_ann)
335
+
336
+ for base_rel_annotation in brat_example["relations"]:
337
+ if base_rel_annotation["type"].upper() not in _RELATION_TYPES:
338
+ continue
339
+
340
+ head_id = example_prefix + base_rel_annotation["head"]["ref_id"]
341
+ tail_id = example_prefix + base_rel_annotation["tail"]["ref_id"]
342
+
343
+ if head_id not in entity_ids or tail_id not in entity_ids:
344
+ continue
345
+
346
+ relation = {
347
+ "id": example_prefix + base_rel_annotation["id"],
348
+ "type": base_rel_annotation["type"],
349
+ "arg1_id": head_id,
350
+ "arg2_id": tail_id,
351
+ "normalized": [],
352
+ }
353
+
354
+ source_example["relations"].append(relation)
355
+
356
+ relation_id = len(brat_example["relations"]) + 10
357
+ for base_co_reference in brat_example["equivalences"]:
358
+ ref_ids = base_co_reference["ref_ids"]
359
+ for i, arg1 in enumerate(ref_ids[:-1]):
360
+ for arg2 in ref_ids[i + 1 :]:
361
+ if arg1 not in entity_ids or arg2 not in entity_ids:
362
+ continue
363
+
364
+ or_relation = {
365
+ "id": example_prefix + f"R{relation_id}",
366
+ "type": "OR",
367
+ "arg1_id": example_prefix + arg1,
368
+ "arg2_id": example_prefix + arg2,
369
+ "normalized": [],
370
+ }
371
+
372
+ source_example["relations"].append(or_relation)
373
+ relation_id += 1
374
+
375
+ return source_example
376
+
377
+ def _fix_entity_offsets(
378
+ self, doc_text: str, entity_text: str, given_offsets: List[int]
379
+ ) -> List[int]:
380
+ """
381
+ Fixes incorrect mention offsets by checking whether the given entity mention text can be
382
+ found to the left or right of the given offsets by considering incrementally larger shifts.
383
+ """
384
+ left = given_offsets[0]
385
+ right = given_offsets[1]
386
+
387
+ # Some annotations contain whitespaces - we ignore them
388
+ clean_entity_text = entity_text.strip()
389
+
390
+ i = 0
391
+ while i <= _MAX_OFFSET_CORRECTION:
392
+ # Move mention window to the left
393
+ if doc_text[left - i : right - i].strip() == clean_entity_text:
394
+ return [left - i, left - i + len(clean_entity_text)]
395
+
396
+ # Move mention window to the right
397
+ elif doc_text[left + i : right + i].strip() == clean_entity_text:
398
+ return [left + i, left + i + len(clean_entity_text)]
399
+
400
+ i += 1
401
+
402
+ # We can't find any better offsets
403
+ return given_offsets
404
+
405
+ def _get_texts_for_multiple_offsets(
406
+ self, document_text: str, offsets: List[List[int]]
407
+ ) -> List[str]:
408
+ """
409
+ Extracts the single text span for a given list of offsets.
410
+ """
411
+ texts = []
412
+ for offset in offsets:
413
+ texts.append(document_text[offset[0] : offset[1]])
414
+ return texts
415
+
416
+
417
+ def parse_brat_file(txt_file: Path, annotation_file_suffixes: List[str] = None) -> Dict:
418
+ """
419
+ Parse a brat file into the schema defined below.
420
+ `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
421
+ Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
422
+ e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
423
+
424
+ Schema of the parse:
425
+ features = datasets.Features(
426
+ {
427
+ "id": datasets.Value("string"),
428
+ "document_id": datasets.Value("string"),
429
+ "text": datasets.Value("string"),
430
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
431
+ {
432
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
433
+ "text": datasets.Sequence(datasets.Value("string")),
434
+ "type": datasets.Value("string"),
435
+ "id": datasets.Value("string"),
436
+ }
437
+ ],
438
+ "events": [ # E line in brat
439
+ {
440
+ "trigger": datasets.Value(
441
+ "string"
442
+ ), # refers to the text_bound_annotation of the trigger,
443
+ "id": datasets.Value("string"),
444
+ "type": datasets.Value("string"),
445
+ "arguments": datasets.Sequence(
446
+ {
447
+ "role": datasets.Value("string"),
448
+ "ref_id": datasets.Value("string"),
449
+ }
450
+ ),
451
+ }
452
+ ],
453
+ "relations": [ # R line in brat
454
+ {
455
+ "id": datasets.Value("string"),
456
+ "head": {
457
+ "ref_id": datasets.Value("string"),
458
+ "role": datasets.Value("string"),
459
+ },
460
+ "tail": {
461
+ "ref_id": datasets.Value("string"),
462
+ "role": datasets.Value("string"),
463
+ },
464
+ "type": datasets.Value("string"),
465
+ }
466
+ ],
467
+ "equivalences": [ # Equiv line in brat
468
+ {
469
+ "id": datasets.Value("string"),
470
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
471
+ }
472
+ ],
473
+ "attributes": [ # M or A lines in brat
474
+ {
475
+ "id": datasets.Value("string"),
476
+ "type": datasets.Value("string"),
477
+ "ref_id": datasets.Value("string"),
478
+ "value": datasets.Value("string"),
479
+ }
480
+ ],
481
+ "normalizations": [ # N lines in brat
482
+ {
483
+ "id": datasets.Value("string"),
484
+ "type": datasets.Value("string"),
485
+ "ref_id": datasets.Value("string"),
486
+ "resource_name": datasets.Value(
487
+ "string"
488
+ ), # Name of the resource, e.g. "Wikipedia"
489
+ "cuid": datasets.Value(
490
+ "string"
491
+ ), # ID in the resource, e.g. 534366
492
+ "text": datasets.Value(
493
+ "string"
494
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
495
+ }
496
+ ],
497
+ },
498
+ )
499
+ """
500
+
501
+ example = {}
502
+ example["document_id"] = txt_file.with_suffix("").name
503
+ with txt_file.open() as f:
504
+ example["text"] = f.read()
505
+
506
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
507
+ # for event extraction
508
+ if annotation_file_suffixes is None:
509
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
510
+
511
+ if len(annotation_file_suffixes) == 0:
512
+ raise AssertionError(
513
+ "At least one suffix for the to-be-read annotation files should be given!"
514
+ )
515
+
516
+ ann_lines = []
517
+ for suffix in annotation_file_suffixes:
518
+ annotation_file = txt_file.with_suffix(suffix)
519
+ if annotation_file.exists():
520
+ with annotation_file.open() as f:
521
+ ann_lines.extend(f.readlines())
522
+
523
+ example["text_bound_annotations"] = []
524
+ example["events"] = []
525
+ example["relations"] = []
526
+ example["equivalences"] = []
527
+ example["attributes"] = []
528
+ example["normalizations"] = []
529
+
530
+ prev_tb_annotation = None
531
+
532
+ for line in ann_lines:
533
+ orig_line = line
534
+ line = line.strip()
535
+ if not line:
536
+ continue
537
+
538
+ # If an (entity) annotation spans multiple lines, this will result in multiple
539
+ # lines also in the annotation file
540
+ if "\t" not in line and prev_tb_annotation is not None:
541
+ prev_tb_annotation["text"][0] += "\n" + orig_line[:-1]
542
+ continue
543
+
544
+ if line.startswith("T"): # Text bound
545
+ ann = {}
546
+ fields = line.split("\t")
547
+
548
+ ann["id"] = fields[0]
549
+ ann["text"] = [fields[2]]
550
+ ann["type"] = fields[1].split()[0]
551
+ ann["offsets"] = []
552
+ span_str = parsing.remove_prefix(fields[1], (ann["type"] + " "))
553
+ for span in span_str.split(";"):
554
+ start, end = span.split()
555
+ ann["offsets"].append([int(start), int(end)])
556
+
557
+ example["text_bound_annotations"].append(ann)
558
+ prev_tb_annotation = ann
559
+
560
+ elif line.startswith("E"):
561
+ ann = {}
562
+ fields = line.split("\t")
563
+
564
+ ann["id"] = fields[0]
565
+
566
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
567
+
568
+ ann["arguments"] = []
569
+ for role_ref_id in fields[1].split()[1:]:
570
+ argument = {
571
+ "role": (role_ref_id.split(":"))[0],
572
+ "ref_id": (role_ref_id.split(":"))[1],
573
+ }
574
+ ann["arguments"].append(argument)
575
+
576
+ example["events"].append(ann)
577
+ prev_tb_annotation = None
578
+
579
+ elif line.startswith("R"):
580
+ ann = {}
581
+ fields = line.split("\t")
582
+
583
+ ann["id"] = fields[0]
584
+ ann["type"] = fields[1].split()[0]
585
+
586
+ ann["head"] = {
587
+ "role": fields[1].split()[1].split(":")[0],
588
+ "ref_id": fields[1].split()[1].split(":")[1],
589
+ }
590
+ ann["tail"] = {
591
+ "role": fields[1].split()[2].split(":")[0],
592
+ "ref_id": fields[1].split()[2].split(":")[1],
593
+ }
594
+
595
+ example["relations"].append(ann)
596
+ prev_tb_annotation = None
597
+
598
+ # '*' seems to be the legacy way to mark equivalences,
599
+ # but I couldn't find any info on the current way
600
+ # this might have to be adapted dependent on the brat version
601
+ # of the annotation
602
+ elif line.startswith("*"):
603
+ ann = {}
604
+ fields = line.split("\t")
605
+
606
+ ann["id"] = fields[0]
607
+ ann["ref_ids"] = fields[1].split()[1:]
608
+
609
+ example["equivalences"].append(ann)
610
+ prev_tb_annotation = None
611
+
612
+ elif line.startswith("A") or line.startswith("M"):
613
+ ann = {}
614
+ fields = line.split("\t")
615
+
616
+ ann["id"] = fields[0]
617
+
618
+ info = fields[1].split()
619
+ ann["type"] = info[0]
620
+ ann["ref_id"] = info[1]
621
+
622
+ if len(info) > 2:
623
+ ann["value"] = info[2]
624
+ else:
625
+ ann["value"] = ""
626
+
627
+ example["attributes"].append(ann)
628
+ prev_tb_annotation = None
629
+
630
+ elif line.startswith("N"):
631
+ ann = {}
632
+ fields = line.split("\t")
633
+
634
+ ann["id"] = fields[0]
635
+ ann["text"] = fields[2]
636
+
637
+ info = fields[1].split()
638
+
639
+ ann["type"] = info[0]
640
+ ann["ref_id"] = info[1]
641
+ ann["resource_name"] = info[2].split(":")[0]
642
+ ann["cuid"] = info[2].split(":")[1]
643
+
644
+ example["normalizations"].append(ann)
645
+ prev_tb_annotation = None
646
+
647
+ return example