ygorg commited on
Commit
e1b5865
·
verified ·
1 Parent(s): fcc5b30

Update scripts to use data.zip instead of local data.

Browse files
Files changed (2) hide show
  1. DEFT2021.py +158 -158
  2. data.zip +2 -2
DEFT2021.py CHANGED
@@ -1,15 +1,18 @@
1
  import os
2
- import re
3
- import ast
4
- import json
5
  import random
 
6
  from pathlib import Path
7
- from itertools import product
8
- from dataclasses import dataclass
9
- from typing import Dict, List, Tuple
10
 
11
  import datasets
12
- import numpy as np
 
 
 
 
 
 
 
13
 
14
  _CITATION = """\
15
  @inproceedings{grouin-etal-2021-classification,
@@ -29,18 +32,13 @@ _CITATION = """\
29
  }
30
  """
31
 
32
- _DESCRIPTION = """\
33
- ddd
34
- """
35
-
36
- _HOMEPAGE = "ddd"
37
-
38
- _LICENSE = "unknown"
39
-
40
  _SPECIALITIES = ['immunitaire', 'endocriniennes', 'blessures', 'chimiques', 'etatsosy', 'nutritionnelles', 'infections', 'virales', 'parasitaires', 'tumeur', 'osteomusculaires', 'stomatognathique', 'digestif', 'respiratoire', 'ORL', 'nerveux', 'oeil', 'homme', 'femme', 'cardiovasculaires', 'hemopathies', 'genetique', 'peau']
41
 
42
  _LABELS_BASE = ['anatomie', 'date', 'dose', 'duree', 'examen', 'frequence', 'mode', 'moment', 'pathologie', 'sosy', 'substance', 'traitement', 'valeur']
43
 
 
 
 
44
  class DEFT2021(datasets.GeneratorBasedBuilder):
45
 
46
  DEFAULT_CONFIG_NAME = "ner"
@@ -51,7 +49,7 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
51
  ]
52
 
53
  def _info(self):
54
-
55
  if self.config.name.find("cls") != -1:
56
 
57
  features = datasets.Features(
@@ -77,7 +75,13 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
77
  "tokens": datasets.Sequence(datasets.Value("string")),
78
  "ner_tags": datasets.Sequence(
79
  datasets.features.ClassLabel(
80
- names = ['O', 'B-anatomie', 'I-anatomie', 'B-date', 'I-date', 'B-dose', 'I-dose', 'B-duree', 'I-duree', 'B-examen', 'I-examen', 'B-frequence', 'I-frequence', 'B-mode', 'I-mode', 'B-moment', 'I-moment', 'B-pathologie', 'I-pathologie', 'B-sosy', 'I-sosy', 'B-substance', 'I-substance', 'B-traitement', 'I-traitement', 'B-valeur', 'I-valeur'],
 
 
 
 
 
 
81
  )
82
  ),
83
  }
@@ -94,12 +98,8 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
94
 
95
  def _split_generators(self, dl_manager):
96
 
97
- if self.config.data_dir is None:
98
- raise ValueError("This is a local dataset. Please pass the data_dir kwarg to load_dataset.")
99
-
100
- else:
101
- data_dir = self.config.data_dir
102
-
103
  return [
104
  datasets.SplitGenerator(
105
  name=datasets.Split.TRAIN,
@@ -126,52 +126,52 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
126
 
127
  def remove_prefix(self, a: str, prefix: str) -> str:
128
  if a.startswith(prefix):
129
- a = a[len(prefix) :]
130
  return a
131
-
132
  def parse_brat_file(self, txt_file: Path, annotation_file_suffixes: List[str] = None, parse_notes: bool = False) -> Dict:
133
-
134
  example = {}
135
  example["document_id"] = txt_file.with_suffix("").name
136
  with txt_file.open() as f:
137
  example["text"] = f.read()
138
-
139
  # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
140
  # for event extraction
141
  if annotation_file_suffixes is None:
142
  annotation_file_suffixes = [".a1", ".a2", ".ann"]
143
-
144
  if len(annotation_file_suffixes) == 0:
145
  raise AssertionError(
146
  "At least one suffix for the to-be-read annotation files should be given!"
147
  )
148
-
149
  ann_lines = []
150
  for suffix in annotation_file_suffixes:
151
  annotation_file = txt_file.with_suffix(suffix)
152
  if annotation_file.exists():
153
  with annotation_file.open() as f:
154
  ann_lines.extend(f.readlines())
155
-
156
  example["text_bound_annotations"] = []
157
  example["events"] = []
158
  example["relations"] = []
159
  example["equivalences"] = []
160
  example["attributes"] = []
161
  example["normalizations"] = []
162
-
163
  if parse_notes:
164
  example["notes"] = []
165
-
166
  for line in ann_lines:
167
  line = line.strip()
168
  if not line:
169
  continue
170
-
171
  if line.startswith("T"): # Text bound
172
  ann = {}
173
  fields = line.split("\t")
174
-
175
  ann["id"] = fields[0]
176
  ann["type"] = fields[1].split()[0]
177
  ann["offsets"] = []
@@ -180,30 +180,30 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
180
  for span in span_str.split(";"):
181
  start, end = span.split()
182
  ann["offsets"].append([int(start), int(end)])
183
-
184
  # Heuristically split text of discontiguous entities into chunks
185
  ann["text"] = []
186
  if len(ann["offsets"]) > 1:
187
  i = 0
188
  for start, end in ann["offsets"]:
189
  chunk_len = end - start
190
- ann["text"].append(text[i : chunk_len + i])
191
  i += chunk_len
192
  while i < len(text) and text[i] == " ":
193
  i += 1
194
  else:
195
  ann["text"] = [text]
196
-
197
  example["text_bound_annotations"].append(ann)
198
-
199
  elif line.startswith("E"):
200
  ann = {}
201
  fields = line.split("\t")
202
-
203
  ann["id"] = fields[0]
204
-
205
  ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
206
-
207
  ann["arguments"] = []
208
  for role_ref_id in fields[1].split()[1:]:
209
  argument = {
@@ -211,16 +211,16 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
211
  "ref_id": (role_ref_id.split(":"))[1],
212
  }
213
  ann["arguments"].append(argument)
214
-
215
  example["events"].append(ann)
216
-
217
  elif line.startswith("R"):
218
  ann = {}
219
  fields = line.split("\t")
220
-
221
  ann["id"] = fields[0]
222
  ann["type"] = fields[1].split()[0]
223
-
224
  ann["head"] = {
225
  "role": fields[1].split()[1].split(":")[0],
226
  "ref_id": fields[1].split()[1].split(":")[1],
@@ -229,9 +229,9 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
229
  "role": fields[1].split()[2].split(":")[0],
230
  "ref_id": fields[1].split()[2].split(":")[1],
231
  }
232
-
233
  example["relations"].append(ann)
234
-
235
  # '*' seems to be the legacy way to mark equivalences,
236
  # but I couldn't find any info on the current way
237
  # this might have to be adapted dependent on the brat version
@@ -239,136 +239,136 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
239
  elif line.startswith("*"):
240
  ann = {}
241
  fields = line.split("\t")
242
-
243
  ann["id"] = fields[0]
244
  ann["ref_ids"] = fields[1].split()[1:]
245
-
246
  example["equivalences"].append(ann)
247
-
248
  elif line.startswith("A") or line.startswith("M"):
249
  ann = {}
250
  fields = line.split("\t")
251
-
252
  ann["id"] = fields[0]
253
-
254
  info = fields[1].split()
255
  ann["type"] = info[0]
256
  ann["ref_id"] = info[1]
257
-
258
  if len(info) > 2:
259
  ann["value"] = info[2]
260
  else:
261
  ann["value"] = ""
262
-
263
  example["attributes"].append(ann)
264
-
265
  elif line.startswith("N"):
266
  ann = {}
267
  fields = line.split("\t")
268
-
269
  ann["id"] = fields[0]
270
  ann["text"] = fields[2]
271
-
272
  info = fields[1].split()
273
-
274
  ann["type"] = info[0]
275
  ann["ref_id"] = info[1]
276
  ann["resource_name"] = info[2].split(":")[0]
277
  ann["cuid"] = info[2].split(":")[1]
278
  example["normalizations"].append(ann)
279
-
280
  elif parse_notes and line.startswith("#"):
281
  ann = {}
282
  fields = line.split("\t")
283
-
284
  ann["id"] = fields[0]
285
  ann["text"] = fields[2] if len(fields) == 3 else "<BB_NULL_STR>"
286
-
287
  info = fields[1].split()
288
-
289
  ann["type"] = info[0]
290
  ann["ref_id"] = info[1]
291
  example["notes"].append(ann)
292
  return example
293
 
294
  def _to_source_example(self, brat_example: Dict) -> Dict:
295
-
296
  source_example = {
297
  "document_id": brat_example["document_id"],
298
  "text": brat_example["text"],
299
  }
300
-
301
  source_example["entities"] = []
302
-
303
  for entity_annotation in brat_example["text_bound_annotations"]:
304
  entity_ann = entity_annotation.copy()
305
-
306
  # Change id property name
307
  entity_ann["entity_id"] = entity_ann["id"]
308
  entity_ann.pop("id")
309
-
310
  # Add entity annotation to sample
311
  source_example["entities"].append(entity_ann)
312
-
313
  return source_example
314
 
315
  def convert_to_prodigy(self, json_object, list_label):
316
-
317
  def prepare_split(text):
318
-
319
  rep_before = ['?', '!', ';', '*']
320
  rep_after = ['’', "'"]
321
  rep_both = ['-', '/', '[', ']', ':', ')', '(', ',', '.']
322
-
323
  for i in rep_before:
324
- text = text.replace(i, ' '+i)
325
-
326
  for i in rep_after:
327
- text = text.replace(i, i+' ')
328
-
329
  for i in rep_both:
330
- text = text.replace(i, ' '+i+' ')
331
-
332
  text_split = text.split()
333
-
334
  punctuations = [',', '.']
335
  for j in range(0, len(text_split)-1):
336
- if j-1 >= 0 and j+1 <= len(text_split)-1 and text_split[j-1][-1].isdigit() and text_split[j+1][0].isdigit():
337
  if text_split[j] in punctuations:
338
  text_split[j-1:j+2] = [''.join(text_split[j-1:j+2])]
339
-
340
  text = ' '.join(text_split)
341
-
342
  return text
343
-
344
  new_json = []
345
-
346
  for ex in [json_object]:
347
-
348
  text = prepare_split(ex['text'])
349
-
350
  tokenized_text = text.split()
351
-
352
  list_spans = []
353
-
354
  for a in ex['entities']:
355
-
356
  for o in range(len(a['offsets'])):
357
-
358
  text_annot = prepare_split(a['text'][o])
359
-
360
  offset_start = a['offsets'][o][0]
361
  offset_end = a['offsets'][o][1]
362
-
363
  nb_tokens_annot = len(text_annot.split())
364
-
365
  txt_offsetstart = prepare_split(ex['text'][:offset_start])
366
-
367
  nb_tokens_before_annot = len(txt_offsetstart.split())
368
-
369
  token_start = nb_tokens_before_annot
370
  token_end = token_start + nb_tokens_annot - 1
371
-
372
  if a['type'] in list_label:
373
  list_spans.append({
374
  'start': offset_start,
@@ -379,7 +379,7 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
379
  'id': a['entity_id'],
380
  'text': a['text'][o],
381
  })
382
-
383
  res = {
384
  'id': ex['document_id'],
385
  'document_id': ex['document_id'],
@@ -387,117 +387,117 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
387
  'tokens': tokenized_text,
388
  'spans': list_spans
389
  }
390
-
391
  new_json.append(res)
392
-
393
  return new_json
394
 
395
  def convert_to_hf_format(self, json_object):
396
-
397
  dict_out = []
398
-
399
  for i in json_object:
400
-
401
  # Filter annotations to keep the longest annotated spans when there is nested annotations
402
  selected_annotations = []
403
-
404
  if 'spans' in i:
405
-
406
  for idx_j, j in enumerate(i['spans']):
407
-
408
- len_j = int(j['end'])-int(j['start'])
409
- range_j = [l for l in range(int(j['start']),int(j['end']),1)]
410
-
411
  keep = True
412
-
413
  for idx_k, k in enumerate(i['spans'][idx_j+1:]):
414
-
415
- len_k = int(k['end'])-int(k['start'])
416
- range_k = [l for l in range(int(k['start']),int(k['end']),1)]
417
-
418
  inter = list(set(range_k).intersection(set(range_j)))
419
  if len(inter) > 0 and len_j < len_k:
420
  keep = False
421
-
422
  if keep:
423
  selected_annotations.append(j)
424
-
425
  # Create list of labels + id to separate different annotation and prepare IOB2 format
426
  nb_tokens = len(i['tokens'])
427
- ner_tags = ['O']*nb_tokens
428
-
429
  for slct in selected_annotations:
430
-
431
- for x in range(slct['token_start'], slct['token_end']+1, 1):
432
-
433
  if i['tokens'][x] not in slct['text']:
434
  if ner_tags[x-1] == 'O':
435
- ner_tags[x-1] = slct['label']+'-'+slct['id']
436
  else:
437
  if ner_tags[x] == 'O':
438
- ner_tags[x] = slct['label']+'-'+slct['id']
439
-
440
  # Make IOB2 format
441
  ner_tags_IOB2 = []
442
  for idx_l, label in enumerate(ner_tags):
443
-
444
  if label == 'O':
445
  ner_tags_IOB2.append('O')
446
  else:
447
  current_label = label.split('-')[0]
448
  current_id = label.split('-')[1]
449
  if idx_l == 0:
450
- ner_tags_IOB2.append('B-'+current_label)
451
  elif current_label in ner_tags[idx_l-1]:
452
  if current_id == ner_tags[idx_l-1].split('-')[1]:
453
- ner_tags_IOB2.append('I-'+current_label)
454
  else:
455
- ner_tags_IOB2.append('B-'+current_label)
456
  else:
457
- ner_tags_IOB2.append('B-'+current_label)
458
-
459
  dict_out.append({
460
  'id': i['id'],
461
  'document_id': i['document_id'],
462
  "ner_tags": ner_tags_IOB2,
463
  "tokens": i['tokens'],
464
  })
465
-
466
- return dict_out
467
 
 
468
 
469
  def split_sentences(self, json_o):
470
  """
471
  Split each document in sentences to fit the 512 maximum tokens of BERT.
472
-
473
  """
474
-
475
  final_json = []
476
-
477
  for i in json_o:
478
-
479
- ind_punc = [index for index, value in enumerate(i['tokens']) if value=='.'] + [len(i['tokens'])]
480
-
481
  for index, value in enumerate(ind_punc):
482
-
483
- if index==0:
484
- final_json.append({'id': i['id']+'_'+str(index),
485
- 'document_id': i['document_id'],
486
- 'ner_tags': i['ner_tags'][:value+1],
487
- 'tokens': i['tokens'][:value+1]
488
- })
 
489
  else:
490
  prev_value = ind_punc[index-1]
491
- final_json.append({'id': i['id']+'_'+str(index),
492
- 'document_id': i['document_id'],
493
- 'ner_tags': i['ner_tags'][prev_value+1:value+1],
494
- 'tokens': i['tokens'][prev_value+1:value+1]
495
- })
496
-
 
497
  return final_json
498
 
499
  def _generate_examples(self, data_dir, split):
500
-
501
  if self.config.name.find("cls") != -1:
502
 
503
  all_res = {}
@@ -509,7 +509,7 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
509
  else:
510
  split_eval = 'test'
511
 
512
- path_labels = Path(data_dir) / 'evaluations' / f"ref-{split_eval}-deft2021.txt"
513
 
514
  with open(os.path.join(data_dir, 'distribution-corpus.txt')) as f_dist:
515
 
@@ -525,7 +525,7 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
525
 
526
  if len(raw_split) == 3 and raw_split[0] in doc_specialities_:
527
  doc_specialities_[raw_split[0]].append(raw_split[1])
528
-
529
  elif len(raw_split) == 3 and raw_split[0] not in doc_specialities_:
530
  doc_specialities_[raw_split[0]] = [raw_split[1]]
531
 
@@ -533,7 +533,7 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
533
 
534
  for guid, txt_file in enumerate(sorted(ann_path.glob("*.txt"))):
535
 
536
- ann_file = txt_file.with_suffix("").name.split('.')[0]+'.ann'
537
 
538
  if ann_file in doc_specialities_:
539
 
@@ -562,14 +562,14 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
562
  key += 1
563
 
564
  distribution = [line.strip() for line in f_dist.readlines()]
565
-
566
  random.seed(4)
567
  train = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'train 2021']
568
  random.shuffle(train)
569
  random.shuffle(train)
570
  random.shuffle(train)
571
  train, validation = np.split(train, [int(len(train)*0.7096)])
572
-
573
  test = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'test 2021']
574
 
575
  if split == "train":
@@ -580,7 +580,7 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
580
  allowed_ids = list(validation)
581
 
582
  for r in all_res.values():
583
- if r["document_id"]+'.txt' in allowed_ids:
584
  yield r["id"], r
585
 
586
  elif self.config.name.find("ner") != -1:
@@ -618,7 +618,7 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
618
  for h in hf_split:
619
 
620
  if len(h['tokens']) > 0 and len(h['ner_tags']) > 0:
621
-
622
  all_res.append({
623
  "id": str(key),
624
  "document_id": h['document_id'],
@@ -636,5 +636,5 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
636
  allowed_ids = list(test)
637
 
638
  for r in all_res:
639
- if r["document_id"]+'.txt' in allowed_ids:
640
  yield r["id"], r
 
1
  import os
 
 
 
2
  import random
3
+
4
  from pathlib import Path
5
+ import numpy as np
 
 
6
 
7
  import datasets
8
+
9
+ _DESCRIPTION = """\
10
+ ddd
11
+ """
12
+
13
+ _HOMEPAGE = "ddd"
14
+
15
+ _LICENSE = "unknown"
16
 
17
  _CITATION = """\
18
  @inproceedings{grouin-etal-2021-classification,
 
32
  }
33
  """
34
 
 
 
 
 
 
 
 
 
35
  _SPECIALITIES = ['immunitaire', 'endocriniennes', 'blessures', 'chimiques', 'etatsosy', 'nutritionnelles', 'infections', 'virales', 'parasitaires', 'tumeur', 'osteomusculaires', 'stomatognathique', 'digestif', 'respiratoire', 'ORL', 'nerveux', 'oeil', 'homme', 'femme', 'cardiovasculaires', 'hemopathies', 'genetique', 'peau']
36
 
37
  _LABELS_BASE = ['anatomie', 'date', 'dose', 'duree', 'examen', 'frequence', 'mode', 'moment', 'pathologie', 'sosy', 'substance', 'traitement', 'valeur']
38
 
39
+ _URL = "data.zip"
40
+
41
+
42
  class DEFT2021(datasets.GeneratorBasedBuilder):
43
 
44
  DEFAULT_CONFIG_NAME = "ner"
 
49
  ]
50
 
51
  def _info(self):
52
+
53
  if self.config.name.find("cls") != -1:
54
 
55
  features = datasets.Features(
 
75
  "tokens": datasets.Sequence(datasets.Value("string")),
76
  "ner_tags": datasets.Sequence(
77
  datasets.features.ClassLabel(
78
+ names=[
79
+ 'O', 'B-anatomie', 'I-anatomie', 'B-date', 'I-date', 'B-dose',
80
+ 'I-dose', 'B-duree', 'I-duree', 'B-examen', 'I-examen', 'B-frequence',
81
+ 'I-frequence', 'B-mode', 'I-mode', 'B-moment', 'I-moment',
82
+ 'B-pathologie', 'I-pathologie', 'B-sosy', 'I-sosy', 'B-substance',
83
+ 'I-substance', 'B-traitement', 'I-traitement', 'B-valeur', 'I-valeur'
84
+ ],
85
  )
86
  ),
87
  }
 
98
 
99
  def _split_generators(self, dl_manager):
100
 
101
+ data_dir = dl_manager.download_and_extract(_URL).rstrip("/")
102
+
 
 
 
 
103
  return [
104
  datasets.SplitGenerator(
105
  name=datasets.Split.TRAIN,
 
126
 
127
  def remove_prefix(self, a: str, prefix: str) -> str:
128
  if a.startswith(prefix):
129
+ a = a[len(prefix):]
130
  return a
131
+
132
  def parse_brat_file(self, txt_file: Path, annotation_file_suffixes: List[str] = None, parse_notes: bool = False) -> Dict:
133
+
134
  example = {}
135
  example["document_id"] = txt_file.with_suffix("").name
136
  with txt_file.open() as f:
137
  example["text"] = f.read()
138
+
139
  # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
140
  # for event extraction
141
  if annotation_file_suffixes is None:
142
  annotation_file_suffixes = [".a1", ".a2", ".ann"]
143
+
144
  if len(annotation_file_suffixes) == 0:
145
  raise AssertionError(
146
  "At least one suffix for the to-be-read annotation files should be given!"
147
  )
148
+
149
  ann_lines = []
150
  for suffix in annotation_file_suffixes:
151
  annotation_file = txt_file.with_suffix(suffix)
152
  if annotation_file.exists():
153
  with annotation_file.open() as f:
154
  ann_lines.extend(f.readlines())
155
+
156
  example["text_bound_annotations"] = []
157
  example["events"] = []
158
  example["relations"] = []
159
  example["equivalences"] = []
160
  example["attributes"] = []
161
  example["normalizations"] = []
162
+
163
  if parse_notes:
164
  example["notes"] = []
165
+
166
  for line in ann_lines:
167
  line = line.strip()
168
  if not line:
169
  continue
170
+
171
  if line.startswith("T"): # Text bound
172
  ann = {}
173
  fields = line.split("\t")
174
+
175
  ann["id"] = fields[0]
176
  ann["type"] = fields[1].split()[0]
177
  ann["offsets"] = []
 
180
  for span in span_str.split(";"):
181
  start, end = span.split()
182
  ann["offsets"].append([int(start), int(end)])
183
+
184
  # Heuristically split text of discontiguous entities into chunks
185
  ann["text"] = []
186
  if len(ann["offsets"]) > 1:
187
  i = 0
188
  for start, end in ann["offsets"]:
189
  chunk_len = end - start
190
+ ann["text"].append(text[i:chunk_len + i])
191
  i += chunk_len
192
  while i < len(text) and text[i] == " ":
193
  i += 1
194
  else:
195
  ann["text"] = [text]
196
+
197
  example["text_bound_annotations"].append(ann)
198
+
199
  elif line.startswith("E"):
200
  ann = {}
201
  fields = line.split("\t")
202
+
203
  ann["id"] = fields[0]
204
+
205
  ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
206
+
207
  ann["arguments"] = []
208
  for role_ref_id in fields[1].split()[1:]:
209
  argument = {
 
211
  "ref_id": (role_ref_id.split(":"))[1],
212
  }
213
  ann["arguments"].append(argument)
214
+
215
  example["events"].append(ann)
216
+
217
  elif line.startswith("R"):
218
  ann = {}
219
  fields = line.split("\t")
220
+
221
  ann["id"] = fields[0]
222
  ann["type"] = fields[1].split()[0]
223
+
224
  ann["head"] = {
225
  "role": fields[1].split()[1].split(":")[0],
226
  "ref_id": fields[1].split()[1].split(":")[1],
 
229
  "role": fields[1].split()[2].split(":")[0],
230
  "ref_id": fields[1].split()[2].split(":")[1],
231
  }
232
+
233
  example["relations"].append(ann)
234
+
235
  # '*' seems to be the legacy way to mark equivalences,
236
  # but I couldn't find any info on the current way
237
  # this might have to be adapted dependent on the brat version
 
239
  elif line.startswith("*"):
240
  ann = {}
241
  fields = line.split("\t")
242
+
243
  ann["id"] = fields[0]
244
  ann["ref_ids"] = fields[1].split()[1:]
245
+
246
  example["equivalences"].append(ann)
247
+
248
  elif line.startswith("A") or line.startswith("M"):
249
  ann = {}
250
  fields = line.split("\t")
251
+
252
  ann["id"] = fields[0]
253
+
254
  info = fields[1].split()
255
  ann["type"] = info[0]
256
  ann["ref_id"] = info[1]
257
+
258
  if len(info) > 2:
259
  ann["value"] = info[2]
260
  else:
261
  ann["value"] = ""
262
+
263
  example["attributes"].append(ann)
264
+
265
  elif line.startswith("N"):
266
  ann = {}
267
  fields = line.split("\t")
268
+
269
  ann["id"] = fields[0]
270
  ann["text"] = fields[2]
271
+
272
  info = fields[1].split()
273
+
274
  ann["type"] = info[0]
275
  ann["ref_id"] = info[1]
276
  ann["resource_name"] = info[2].split(":")[0]
277
  ann["cuid"] = info[2].split(":")[1]
278
  example["normalizations"].append(ann)
279
+
280
  elif parse_notes and line.startswith("#"):
281
  ann = {}
282
  fields = line.split("\t")
283
+
284
  ann["id"] = fields[0]
285
  ann["text"] = fields[2] if len(fields) == 3 else "<BB_NULL_STR>"
286
+
287
  info = fields[1].split()
288
+
289
  ann["type"] = info[0]
290
  ann["ref_id"] = info[1]
291
  example["notes"].append(ann)
292
  return example
293
 
294
  def _to_source_example(self, brat_example: Dict) -> Dict:
295
+
296
  source_example = {
297
  "document_id": brat_example["document_id"],
298
  "text": brat_example["text"],
299
  }
300
+
301
  source_example["entities"] = []
302
+
303
  for entity_annotation in brat_example["text_bound_annotations"]:
304
  entity_ann = entity_annotation.copy()
305
+
306
  # Change id property name
307
  entity_ann["entity_id"] = entity_ann["id"]
308
  entity_ann.pop("id")
309
+
310
  # Add entity annotation to sample
311
  source_example["entities"].append(entity_ann)
312
+
313
  return source_example
314
 
315
  def convert_to_prodigy(self, json_object, list_label):
316
+
317
  def prepare_split(text):
318
+
319
  rep_before = ['?', '!', ';', '*']
320
  rep_after = ['’', "'"]
321
  rep_both = ['-', '/', '[', ']', ':', ')', '(', ',', '.']
322
+
323
  for i in rep_before:
324
+ text = text.replace(i, ' ' + i)
325
+
326
  for i in rep_after:
327
+ text = text.replace(i, i + ' ')
328
+
329
  for i in rep_both:
330
+ text = text.replace(i, ' ' + i + ' ')
331
+
332
  text_split = text.split()
333
+
334
  punctuations = [',', '.']
335
  for j in range(0, len(text_split)-1):
336
+ if j - 1 >= 0 and j + 1 <= len(text_split) - 1 and text_split[j-1][-1].isdigit() and text_split[j+1][0].isdigit():
337
  if text_split[j] in punctuations:
338
  text_split[j-1:j+2] = [''.join(text_split[j-1:j+2])]
339
+
340
  text = ' '.join(text_split)
341
+
342
  return text
343
+
344
  new_json = []
345
+
346
  for ex in [json_object]:
347
+
348
  text = prepare_split(ex['text'])
349
+
350
  tokenized_text = text.split()
351
+
352
  list_spans = []
353
+
354
  for a in ex['entities']:
355
+
356
  for o in range(len(a['offsets'])):
357
+
358
  text_annot = prepare_split(a['text'][o])
359
+
360
  offset_start = a['offsets'][o][0]
361
  offset_end = a['offsets'][o][1]
362
+
363
  nb_tokens_annot = len(text_annot.split())
364
+
365
  txt_offsetstart = prepare_split(ex['text'][:offset_start])
366
+
367
  nb_tokens_before_annot = len(txt_offsetstart.split())
368
+
369
  token_start = nb_tokens_before_annot
370
  token_end = token_start + nb_tokens_annot - 1
371
+
372
  if a['type'] in list_label:
373
  list_spans.append({
374
  'start': offset_start,
 
379
  'id': a['entity_id'],
380
  'text': a['text'][o],
381
  })
382
+
383
  res = {
384
  'id': ex['document_id'],
385
  'document_id': ex['document_id'],
 
387
  'tokens': tokenized_text,
388
  'spans': list_spans
389
  }
390
+
391
  new_json.append(res)
392
+
393
  return new_json
394
 
395
  def convert_to_hf_format(self, json_object):
396
+
397
  dict_out = []
398
+
399
  for i in json_object:
400
+
401
  # Filter annotations to keep the longest annotated spans when there is nested annotations
402
  selected_annotations = []
403
+
404
  if 'spans' in i:
405
+
406
  for idx_j, j in enumerate(i['spans']):
407
+
408
+ len_j = int(j['end']) - int(j['start'])
409
+ range_j = [l for l in range(int(j['start']), int(j['end']), 1)]
410
+
411
  keep = True
412
+
413
  for idx_k, k in enumerate(i['spans'][idx_j+1:]):
414
+
415
+ len_k = int(k['end']) - int(k['start'])
416
+ range_k = [l for l in range(int(k['start']), int(k['end']), 1)]
417
+
418
  inter = list(set(range_k).intersection(set(range_j)))
419
  if len(inter) > 0 and len_j < len_k:
420
  keep = False
421
+
422
  if keep:
423
  selected_annotations.append(j)
424
+
425
  # Create list of labels + id to separate different annotation and prepare IOB2 format
426
  nb_tokens = len(i['tokens'])
427
+ ner_tags = ['O'] * nb_tokens
428
+
429
  for slct in selected_annotations:
430
+
431
+ for x in range(slct['token_start'], slct['token_end'] + 1, 1):
432
+
433
  if i['tokens'][x] not in slct['text']:
434
  if ner_tags[x-1] == 'O':
435
+ ner_tags[x-1] = slct['label'] + '-' + slct['id']
436
  else:
437
  if ner_tags[x] == 'O':
438
+ ner_tags[x] = slct['label'] + '-' + slct['id']
439
+
440
  # Make IOB2 format
441
  ner_tags_IOB2 = []
442
  for idx_l, label in enumerate(ner_tags):
443
+
444
  if label == 'O':
445
  ner_tags_IOB2.append('O')
446
  else:
447
  current_label = label.split('-')[0]
448
  current_id = label.split('-')[1]
449
  if idx_l == 0:
450
+ ner_tags_IOB2.append('B-' + current_label)
451
  elif current_label in ner_tags[idx_l-1]:
452
  if current_id == ner_tags[idx_l-1].split('-')[1]:
453
+ ner_tags_IOB2.append('I-' + current_label)
454
  else:
455
+ ner_tags_IOB2.append('B-' + current_label)
456
  else:
457
+ ner_tags_IOB2.append('B-' + current_label)
458
+
459
  dict_out.append({
460
  'id': i['id'],
461
  'document_id': i['document_id'],
462
  "ner_tags": ner_tags_IOB2,
463
  "tokens": i['tokens'],
464
  })
 
 
465
 
466
+ return dict_out
467
 
468
  def split_sentences(self, json_o):
469
  """
470
  Split each document in sentences to fit the 512 maximum tokens of BERT.
 
471
  """
472
+
473
  final_json = []
474
+
475
  for i in json_o:
476
+
477
+ ind_punc = [index for index, value in enumerate(i['tokens']) if value == '.'] + [len(i['tokens'])]
478
+
479
  for index, value in enumerate(ind_punc):
480
+
481
+ if index == 0:
482
+ final_json.append({
483
+ 'id': i['id'] + '_' + str(index),
484
+ 'document_id': i['document_id'],
485
+ 'ner_tags': i['ner_tags'][:value+1],
486
+ 'tokens': i['tokens'][:value+1]
487
+ })
488
  else:
489
  prev_value = ind_punc[index-1]
490
+ final_json.append({
491
+ 'id': i['id'] + '_' + str(index),
492
+ 'document_id': i['document_id'],
493
+ 'ner_tags': i['ner_tags'][prev_value+1:value+1],
494
+ 'tokens': i['tokens'][prev_value+1:value+1]
495
+ })
496
+
497
  return final_json
498
 
499
  def _generate_examples(self, data_dir, split):
500
+
501
  if self.config.name.find("cls") != -1:
502
 
503
  all_res = {}
 
509
  else:
510
  split_eval = 'test'
511
 
512
+ path_labels = Path(data_dir) / 'evaluations' / f"ref-{split_eval}-deft2021.txt"
513
 
514
  with open(os.path.join(data_dir, 'distribution-corpus.txt')) as f_dist:
515
 
 
525
 
526
  if len(raw_split) == 3 and raw_split[0] in doc_specialities_:
527
  doc_specialities_[raw_split[0]].append(raw_split[1])
528
+
529
  elif len(raw_split) == 3 and raw_split[0] not in doc_specialities_:
530
  doc_specialities_[raw_split[0]] = [raw_split[1]]
531
 
 
533
 
534
  for guid, txt_file in enumerate(sorted(ann_path.glob("*.txt"))):
535
 
536
+ ann_file = txt_file.with_suffix("").name.split('.')[0] + '.ann'
537
 
538
  if ann_file in doc_specialities_:
539
 
 
562
  key += 1
563
 
564
  distribution = [line.strip() for line in f_dist.readlines()]
565
+
566
  random.seed(4)
567
  train = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'train 2021']
568
  random.shuffle(train)
569
  random.shuffle(train)
570
  random.shuffle(train)
571
  train, validation = np.split(train, [int(len(train)*0.7096)])
572
+
573
  test = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'test 2021']
574
 
575
  if split == "train":
 
580
  allowed_ids = list(validation)
581
 
582
  for r in all_res.values():
583
+ if r["document_id"] + '.txt' in allowed_ids:
584
  yield r["id"], r
585
 
586
  elif self.config.name.find("ner") != -1:
 
618
  for h in hf_split:
619
 
620
  if len(h['tokens']) > 0 and len(h['ner_tags']) > 0:
621
+
622
  all_res.append({
623
  "id": str(key),
624
  "document_id": h['document_id'],
 
636
  allowed_ids = list(test)
637
 
638
  for r in all_res:
639
+ if r["document_id"] + '.txt' in allowed_ids:
640
  yield r["id"], r
data.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:84276b07f43c98f2874d342c08af9346bb3920a5dd4b2deeccd9314ca09dbbdd
3
- size 2046495
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36633ad2d4d1c399dd906c7ba1a11aa352f49aa9e67b7b02414521d965f93bbd
3
+ size 1990713