manikrishna-m commited on
Commit
0b6f818
·
verified ·
1 Parent(s): dec0b5f

Delete loading script

Browse files
Files changed (1) hide show
  1. conll2012_ontonotesv5.py +0 -816
conll2012_ontonotesv5.py DELETED
@@ -1,816 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """CoNLL2012 shared task data based on OntoNotes 5.0"""
16
-
17
- import glob
18
- import os
19
- from collections import defaultdict
20
- from typing import DefaultDict, Iterator, List, Optional, Tuple
21
-
22
- import datasets
23
-
24
-
25
- _CITATION = """\
26
- @inproceedings{pradhan-etal-2013-towards,
27
- title = "Towards Robust Linguistic Analysis using {O}nto{N}otes",
28
- author = {Pradhan, Sameer and
29
- Moschitti, Alessandro and
30
- Xue, Nianwen and
31
- Ng, Hwee Tou and
32
- Bj{\"o}rkelund, Anders and
33
- Uryupina, Olga and
34
- Zhang, Yuchen and
35
- Zhong, Zhi},
36
- booktitle = "Proceedings of the Seventeenth Conference on Computational Natural Language Learning",
37
- month = aug,
38
- year = "2013",
39
- address = "Sofia, Bulgaria",
40
- publisher = "Association for Computational Linguistics",
41
- url = "https://aclanthology.org/W13-3516",
42
- pages = "143--152",
43
- }
44
-
45
- Ralph Weischedel, Martha Palmer, Mitchell Marcus, Eduard Hovy, Sameer Pradhan, \
46
- Lance Ramshaw, Nianwen Xue, Ann Taylor, Jeff Kaufman, Michelle Franchini, \
47
- Mohammed El-Bachouti, Robert Belvin, Ann Houston. \
48
- OntoNotes Release 5.0 LDC2013T19. \
49
- Web Download. Philadelphia: Linguistic Data Consortium, 2013.
50
- """
51
-
52
- _DESCRIPTION = """\
53
- OntoNotes v5.0 is the final version of OntoNotes corpus, and is a large-scale, multi-genre,
54
- multilingual corpus manually annotated with syntactic, semantic and discourse information.
55
-
56
- This dataset is the version of OntoNotes v5.0 extended and is used in the CoNLL-2012 shared task.
57
- It includes v4 train/dev and v9 test data for English/Chinese/Arabic and corrected version v12 train/dev/test data (English only).
58
-
59
- The source of data is the Mendeley Data repo [ontonotes-conll2012](https://data.mendeley.com/datasets/zmycy7t9h9), which seems to be as the same as the official data, but users should use this dataset on their own responsibility.
60
-
61
- See also summaries from paperwithcode, [OntoNotes 5.0](https://paperswithcode.com/dataset/ontonotes-5-0) and [CoNLL-2012](https://paperswithcode.com/dataset/conll-2012-1)
62
-
63
- For more detailed info of the dataset like annotation, tag set, etc., you can refer to the documents in the Mendeley repo mentioned above.
64
- """
65
-
66
- _URL = "https://data.mendeley.com/public-files/datasets/zmycy7t9h9/files/b078e1c4-f7a4-4427-be7f-9389967831ef/file_downloaded"
67
-
68
-
69
- class Conll2012Ontonotesv5Config(datasets.BuilderConfig):
70
- """BuilderConfig for the CoNLL formatted OntoNotes dataset."""
71
-
72
- def __init__(self, language=None, conll_version=None, **kwargs):
73
- """BuilderConfig for the CoNLL formatted OntoNotes dataset.
74
-
75
- Args:
76
- language: string, one of the language {"english", "chinese", "arabic"} .
77
- conll_version: string, "v4" or "v12". Note there is only English v12.
78
- **kwargs: keyword arguments forwarded to super.
79
- """
80
- assert language in ["english", "chinese", "arabic"]
81
- assert conll_version in ["v4", "v12"]
82
- if conll_version == "v12":
83
- assert language == "english"
84
- super(Conll2012Ontonotesv5Config, self).__init__(
85
- name=f"{language}_{conll_version}",
86
- description=f"{conll_version} of CoNLL formatted OntoNotes dataset for {language}.",
87
- version=datasets.Version("1.0.0"), # hf dataset script version
88
- **kwargs,
89
- )
90
- self.language = language
91
- self.conll_version = conll_version
92
-
93
-
94
- class Conll2012Ontonotesv5(datasets.GeneratorBasedBuilder):
95
- """The CoNLL formatted OntoNotes dataset."""
96
-
97
- BUILDER_CONFIGS = [
98
- Conll2012Ontonotesv5Config(
99
- language=lang,
100
- conll_version="v4",
101
- )
102
- for lang in ["english", "chinese", "arabic"]
103
- ] + [
104
- Conll2012Ontonotesv5Config(
105
- language="english",
106
- conll_version="v12",
107
- )
108
- ]
109
-
110
- def _info(self):
111
- lang = self.config.language
112
- conll_version = self.config.conll_version
113
- if lang == "arabic":
114
- pos_tag_feature = datasets.Value("string")
115
- else:
116
- tag_set = _POS_TAGS[f"{lang}_{conll_version}"]
117
- pos_tag_feature = datasets.ClassLabel(num_classes=len(tag_set), names=tag_set)
118
-
119
- return datasets.DatasetInfo(
120
- description=_DESCRIPTION,
121
- features=datasets.Features(
122
- {
123
- "document_id": datasets.Value("string"),
124
- "sentences": [
125
- {
126
- "part_id": datasets.Value("int32"),
127
- "words": datasets.Sequence(datasets.Value("string")),
128
- "pos_tags": datasets.Sequence(pos_tag_feature),
129
- "parse_tree": datasets.Value("string"),
130
- "predicate_lemmas": datasets.Sequence(datasets.Value("string")),
131
- "predicate_framenet_ids": datasets.Sequence(datasets.Value("string")),
132
- "word_senses": datasets.Sequence(datasets.Value("float32")),
133
- "speaker": datasets.Value("string"),
134
- "named_entities": datasets.Sequence(
135
- datasets.ClassLabel(num_classes=37, names=_NAMED_ENTITY_TAGS)
136
- ),
137
- "srl_frames": [
138
- {
139
- "verb": datasets.Value("string"),
140
- "frames": datasets.Sequence(datasets.Value("string")),
141
- }
142
- ],
143
- "coref_spans": datasets.Sequence(datasets.Sequence(datasets.Value("int32"), length=3)),
144
- }
145
- ],
146
- }
147
- ),
148
- homepage="https://conll.cemantix.org/2012/introduction.html",
149
- citation=_CITATION,
150
- )
151
-
152
- def _split_generators(self, dl_manager):
153
- lang = self.config.language
154
- conll_version = self.config.conll_version
155
- dl_dir = dl_manager.download_and_extract(_URL)
156
- data_dir = os.path.join(dl_dir, f"conll-2012/{conll_version}/data")
157
-
158
- return [
159
- datasets.SplitGenerator(
160
- name=datasets.Split.TRAIN,
161
- gen_kwargs={"conll_files_directory": os.path.join(data_dir, f"train/data/{lang}")},
162
- ),
163
- datasets.SplitGenerator(
164
- name=datasets.Split.VALIDATION,
165
- gen_kwargs={"conll_files_directory": os.path.join(data_dir, f"development/data/{lang}")},
166
- ),
167
- datasets.SplitGenerator(
168
- name=datasets.Split.TEST,
169
- gen_kwargs={"conll_files_directory": os.path.join(data_dir, f"test/data/{lang}")},
170
- ),
171
- ]
172
-
173
- def _generate_examples(self, conll_files_directory):
174
- conll_files = sorted(glob.glob(os.path.join(conll_files_directory, "**/*gold_conll"), recursive=True))
175
- for idx, conll_file in enumerate(conll_files):
176
- sentences = []
177
- for sent in Ontonotes().sentence_iterator(conll_file):
178
- document_id = sent.document_id
179
- sentences.append(
180
- {
181
- "part_id": sent.sentence_id, # should be part id, according to https://conll.cemantix.org/2012/data.html
182
- "words": sent.words,
183
- "pos_tags": sent.pos_tags,
184
- "parse_tree": sent.parse_tree,
185
- "predicate_lemmas": sent.predicate_lemmas,
186
- "predicate_framenet_ids": sent.predicate_framenet_ids,
187
- "word_senses": sent.word_senses,
188
- "speaker": sent.speakers[0],
189
- "named_entities": sent.named_entities,
190
- "srl_frames": [{"verb": f[0], "frames": f[1]} for f in sent.srl_frames],
191
- "coref_spans": [(c[0], *c[1]) for c in sent.coref_spans],
192
- }
193
- )
194
- yield idx, {"document_id": document_id, "sentences": sentences}
195
-
196
-
197
- # --------------------------------------------------------------------------------------------------------
198
- # Tag set
199
- _NAMED_ENTITY_TAGS = [
200
- "O", # out of named entity
201
- "B-PERSON",
202
- "I-PERSON",
203
- "B-NORP",
204
- "I-NORP",
205
- "B-FAC", # FACILITY
206
- "I-FAC",
207
- "B-ORG", # ORGANIZATION
208
- "I-ORG",
209
- "B-GPE",
210
- "I-GPE",
211
- "B-LOC",
212
- "I-LOC",
213
- "B-PRODUCT",
214
- "I-PRODUCT",
215
- "B-DATE",
216
- "I-DATE",
217
- "B-TIME",
218
- "I-TIME",
219
- "B-PERCENT",
220
- "I-PERCENT",
221
- "B-MONEY",
222
- "I-MONEY",
223
- "B-QUANTITY",
224
- "I-QUANTITY",
225
- "B-ORDINAL",
226
- "I-ORDINAL",
227
- "B-CARDINAL",
228
- "I-CARDINAL",
229
- "B-EVENT",
230
- "I-EVENT",
231
- "B-WORK_OF_ART",
232
- "I-WORK_OF_ART",
233
- "B-LAW",
234
- "I-LAW",
235
- "B-LANGUAGE",
236
- "I-LANGUAGE",
237
- ]
238
-
239
- _POS_TAGS = {
240
- "english_v4": [
241
- "XX", # missing
242
- "``",
243
- "$",
244
- "''",
245
- ",",
246
- "-LRB-", # (
247
- "-RRB-", # )
248
- ".",
249
- ":",
250
- "ADD",
251
- "AFX",
252
- "CC",
253
- "CD",
254
- "DT",
255
- "EX",
256
- "FW",
257
- "HYPH",
258
- "IN",
259
- "JJ",
260
- "JJR",
261
- "JJS",
262
- "LS",
263
- "MD",
264
- "NFP",
265
- "NN",
266
- "NNP",
267
- "NNPS",
268
- "NNS",
269
- "PDT",
270
- "POS",
271
- "PRP",
272
- "PRP$",
273
- "RB",
274
- "RBR",
275
- "RBS",
276
- "RP",
277
- "SYM",
278
- "TO",
279
- "UH",
280
- "VB",
281
- "VBD",
282
- "VBG",
283
- "VBN",
284
- "VBP",
285
- "VBZ",
286
- "WDT",
287
- "WP",
288
- "WP$",
289
- "WRB",
290
- ], # 49
291
- "english_v12": [
292
- "XX", # misssing
293
- "``",
294
- "$",
295
- "''",
296
- "*",
297
- ",",
298
- "-LRB-", # (
299
- "-RRB-", # )
300
- ".",
301
- ":",
302
- "ADD",
303
- "AFX",
304
- "CC",
305
- "CD",
306
- "DT",
307
- "EX",
308
- "FW",
309
- "HYPH",
310
- "IN",
311
- "JJ",
312
- "JJR",
313
- "JJS",
314
- "LS",
315
- "MD",
316
- "NFP",
317
- "NN",
318
- "NNP",
319
- "NNPS",
320
- "NNS",
321
- "PDT",
322
- "POS",
323
- "PRP",
324
- "PRP$",
325
- "RB",
326
- "RBR",
327
- "RBS",
328
- "RP",
329
- "SYM",
330
- "TO",
331
- "UH",
332
- "VB",
333
- "VBD",
334
- "VBG",
335
- "VBN",
336
- "VBP",
337
- "VBZ",
338
- "VERB",
339
- "WDT",
340
- "WP",
341
- "WP$",
342
- "WRB",
343
- ], # 51
344
- "chinese_v4": [
345
- "X", # missing
346
- "AD",
347
- "AS",
348
- "BA",
349
- "CC",
350
- "CD",
351
- "CS",
352
- "DEC",
353
- "DEG",
354
- "DER",
355
- "DEV",
356
- "DT",
357
- "ETC",
358
- "FW",
359
- "IJ",
360
- "INF",
361
- "JJ",
362
- "LB",
363
- "LC",
364
- "M",
365
- "MSP",
366
- "NN",
367
- "NR",
368
- "NT",
369
- "OD",
370
- "ON",
371
- "P",
372
- "PN",
373
- "PU",
374
- "SB",
375
- "SP",
376
- "URL",
377
- "VA",
378
- "VC",
379
- "VE",
380
- "VV",
381
- ], # 36
382
- }
383
-
384
- # --------------------------------------------------------------------------------------------------------
385
- # The CoNLL(2012) file reader
386
- # Modified the original code to get rid of extra package dependency.
387
- # Original code: https://github.com/allenai/allennlp-models/blob/main/allennlp_models/common/ontonotes.py
388
-
389
-
390
- class OntonotesSentence:
391
- """
392
- A class representing the annotations available for a single CONLL formatted sentence.
393
- # Parameters
394
- document_id : `str`
395
- This is a variation on the document filename
396
- sentence_id : `int`
397
- The integer ID of the sentence within a document.
398
- words : `List[str]`
399
- This is the tokens as segmented/tokenized in the bank.
400
- pos_tags : `List[str]`
401
- This is the Penn-Treebank-style part of speech. When parse information is missing,
402
- all parts of speech except the one for which there is some sense or proposition
403
- annotation are marked with a XX tag. The verb is marked with just a VERB tag.
404
- parse_tree : `nltk.Tree`
405
- An nltk Tree representing the parse. It includes POS tags as pre-terminal nodes.
406
- When the parse information is missing, the parse will be `None`.
407
- predicate_lemmas : `List[Optional[str]]`
408
- The predicate lemma of the words for which we have semantic role
409
- information or word sense information. All other indices are `None`.
410
- predicate_framenet_ids : `List[Optional[int]]`
411
- The PropBank frameset ID of the lemmas in `predicate_lemmas`, or `None`.
412
- word_senses : `List[Optional[float]]`
413
- The word senses for the words in the sentence, or `None`. These are floats
414
- because the word sense can have values after the decimal, like `1.1`.
415
- speakers : `List[Optional[str]]`
416
- The speaker information for the words in the sentence, if present, or `None`
417
- This is the speaker or author name where available. Mostly in Broadcast Conversation
418
- and Web Log data. When not available the rows are marked with an "-".
419
- named_entities : `List[str]`
420
- The BIO tags for named entities in the sentence.
421
- srl_frames : `List[Tuple[str, List[str]]]`
422
- A dictionary keyed by the verb in the sentence for the given
423
- Propbank frame labels, in a BIO format.
424
- coref_spans : `Set[TypedSpan]`
425
- The spans for entity mentions involved in coreference resolution within the sentence.
426
- Each element is a tuple composed of (cluster_id, (start_index, end_index)). Indices
427
- are `inclusive`.
428
- """
429
-
430
- def __init__(
431
- self,
432
- document_id: str,
433
- sentence_id: int,
434
- words: List[str],
435
- pos_tags: List[str],
436
- parse_tree: Optional[str],
437
- predicate_lemmas: List[Optional[str]],
438
- predicate_framenet_ids: List[Optional[str]],
439
- word_senses: List[Optional[float]],
440
- speakers: List[Optional[str]],
441
- named_entities: List[str],
442
- srl_frames: List[Tuple[str, List[str]]],
443
- coref_spans,
444
- ) -> None:
445
-
446
- self.document_id = document_id
447
- self.sentence_id = sentence_id
448
- self.words = words
449
- self.pos_tags = pos_tags
450
- self.parse_tree = parse_tree
451
- self.predicate_lemmas = predicate_lemmas
452
- self.predicate_framenet_ids = predicate_framenet_ids
453
- self.word_senses = word_senses
454
- self.speakers = speakers
455
- self.named_entities = named_entities
456
- self.srl_frames = srl_frames
457
- self.coref_spans = coref_spans
458
-
459
-
460
- class Ontonotes:
461
- """
462
- This `DatasetReader` is designed to read in the English OntoNotes v5.0 data
463
- in the format used by the CoNLL 2011/2012 shared tasks. In order to use this
464
- Reader, you must follow the instructions provided [here (v12 release):]
465
- (https://cemantix.org/data/ontonotes.html), which will allow you to download
466
- the CoNLL style annotations for the OntoNotes v5.0 release -- LDC2013T19.tgz
467
- obtained from LDC.
468
- Once you have run the scripts on the extracted data, you will have a folder
469
- structured as follows:
470
- ```
471
- conll-formatted-ontonotes-5.0/
472
- ── data
473
- ├── development
474
- └── data
475
- └── english
476
- └── annotations
477
- ├── bc
478
- ├── bn
479
- ├── mz
480
- ├── nw
481
- ├── pt
482
- ├── tc
483
- └── wb
484
- ├── test
485
- └── data
486
- └── english
487
- └── annotations
488
- ├── bc
489
- ├── bn
490
- ├── mz
491
- ├── nw
492
- ├── pt
493
- ├── tc
494
- └── wb
495
- └── train
496
- └── data
497
- └── english
498
- └── annotations
499
- ├── bc
500
- ├── bn
501
- ├── mz
502
- ├── nw
503
- ├── pt
504
- ├── tc
505
- └── wb
506
- ```
507
- The file path provided to this class can then be any of the train, test or development
508
- directories(or the top level data directory, if you are not utilizing the splits).
509
- The data has the following format, ordered by column.
510
- 1. Document ID : `str`
511
- This is a variation on the document filename
512
- 2. Part number : `int`
513
- Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
514
- 3. Word number : `int`
515
- This is the word index of the word in that sentence.
516
- 4. Word : `str`
517
- This is the token as segmented/tokenized in the Treebank. Initially the `*_skel` file
518
- contain the placeholder [WORD] which gets replaced by the actual token from the
519
- Treebank which is part of the OntoNotes release.
520
- 5. POS Tag : `str`
521
- This is the Penn Treebank style part of speech. When parse information is missing,
522
- all part of speeches except the one for which there is some sense or proposition
523
- annotation are marked with a XX tag. The verb is marked with just a VERB tag.
524
- 6. Parse bit : `str`
525
- This is the bracketed structure broken before the first open parenthesis in the parse,
526
- and the word/part-of-speech leaf replaced with a `*`. When the parse information is
527
- missing, the first word of a sentence is tagged as `(TOP*` and the last word is tagged
528
- as `*)` and all intermediate words are tagged with a `*`.
529
- 7. Predicate lemma : `str`
530
- The predicate lemma is mentioned for the rows for which we have semantic role
531
- information or word sense information. All other rows are marked with a "-".
532
- 8. Predicate Frameset ID : `int`
533
- The PropBank frameset ID of the predicate in Column 7.
534
- 9. Word sense : `float`
535
- This is the word sense of the word in Column 3.
536
- 10. Speaker/Author : `str`
537
- This is the speaker or author name where available. Mostly in Broadcast Conversation
538
- and Web Log data. When not available the rows are marked with an "-".
539
- 11. Named Entities : `str`
540
- These columns identifies the spans representing various named entities. For documents
541
- which do not have named entity annotation, each line is represented with an `*`.
542
- 12. Predicate Arguments : `str`
543
- There is one column each of predicate argument structure information for the predicate
544
- mentioned in Column 7. If there are no predicates tagged in a sentence this is a
545
- single column with all rows marked with an `*`.
546
- -1. Co-reference : `str`
547
- Co-reference chain information encoded in a parenthesis structure. For documents that do
548
- not have co-reference annotations, each line is represented with a "-".
549
- """
550
-
551
- def dataset_iterator(self, file_path: str) -> Iterator[OntonotesSentence]:
552
- """
553
- An iterator over the entire dataset, yielding all sentences processed.
554
- """
555
- for conll_file in self.dataset_path_iterator(file_path):
556
- yield from self.sentence_iterator(conll_file)
557
-
558
- @staticmethod
559
- def dataset_path_iterator(file_path: str) -> Iterator[str]:
560
- """
561
- An iterator returning file_paths in a directory
562
- containing CONLL-formatted files.
563
- """
564
- for root, _, files in list(os.walk(file_path)):
565
- for data_file in sorted(files):
566
- # These are a relic of the dataset pre-processing. Every
567
- # file will be duplicated - one file called filename.gold_skel
568
- # and one generated from the preprocessing called filename.gold_conll.
569
- if not data_file.endswith("gold_conll"):
570
- continue
571
-
572
- yield os.path.join(root, data_file)
573
-
574
- def dataset_document_iterator(self, file_path: str) -> Iterator[List[OntonotesSentence]]:
575
- """
576
- An iterator over CONLL formatted files which yields documents, regardless
577
- of the number of document annotations in a particular file. This is useful
578
- for conll data which has been preprocessed, such as the preprocessing which
579
- takes place for the 2012 CONLL Coreference Resolution task.
580
- """
581
- with open(file_path, "r", encoding="utf8") as open_file:
582
- conll_rows = []
583
- document: List[OntonotesSentence] = []
584
- for line in open_file:
585
- line = line.strip()
586
- if line != "" and not line.startswith("#"):
587
- # Non-empty line. Collect the annotation.
588
- conll_rows.append(line)
589
- else:
590
- if conll_rows:
591
- document.append(self._conll_rows_to_sentence(conll_rows))
592
- conll_rows = []
593
- if line.startswith("#end document"):
594
- yield document
595
- document = []
596
- if document:
597
- # Collect any stragglers or files which might not
598
- # have the '#end document' format for the end of the file.
599
- yield document
600
-
601
- def sentence_iterator(self, file_path: str) -> Iterator[OntonotesSentence]:
602
- """
603
- An iterator over the sentences in an individual CONLL formatted file.
604
- """
605
- for document in self.dataset_document_iterator(file_path):
606
- for sentence in document:
607
- yield sentence
608
-
609
- def _conll_rows_to_sentence(self, conll_rows: List[str]) -> OntonotesSentence:
610
- document_id: str = None
611
- sentence_id: int = None
612
- # The words in the sentence.
613
- sentence: List[str] = []
614
- # The pos tags of the words in the sentence.
615
- pos_tags: List[str] = []
616
- # the pieces of the parse tree.
617
- parse_pieces: List[str] = []
618
- # The lemmatised form of the words in the sentence which
619
- # have SRL or word sense information.
620
- predicate_lemmas: List[str] = []
621
- # The FrameNet ID of the predicate.
622
- predicate_framenet_ids: List[str] = []
623
- # The sense of the word, if available.
624
- word_senses: List[float] = []
625
- # The current speaker, if available.
626
- speakers: List[str] = []
627
-
628
- verbal_predicates: List[str] = []
629
- span_labels: List[List[str]] = []
630
- current_span_labels: List[str] = []
631
-
632
- # Cluster id -> List of (start_index, end_index) spans.
633
- clusters: DefaultDict[int, List[Tuple[int, int]]] = defaultdict(list)
634
- # Cluster id -> List of start_indices which are open for this id.
635
- coref_stacks: DefaultDict[int, List[int]] = defaultdict(list)
636
-
637
- for index, row in enumerate(conll_rows):
638
- conll_components = row.split()
639
-
640
- document_id = conll_components[0]
641
- sentence_id = int(conll_components[1])
642
- word = conll_components[3]
643
- pos_tag = conll_components[4]
644
- parse_piece = conll_components[5]
645
-
646
- # Replace brackets in text and pos tags
647
- # with a different token for parse trees.
648
- if pos_tag != "XX" and word != "XX":
649
- if word == "(":
650
- parse_word = "-LRB-"
651
- elif word == ")":
652
- parse_word = "-RRB-"
653
- else:
654
- parse_word = word
655
- if pos_tag == "(":
656
- pos_tag = "-LRB-"
657
- if pos_tag == ")":
658
- pos_tag = "-RRB-"
659
- (left_brackets, right_hand_side) = parse_piece.split("*")
660
- # only keep ')' if there are nested brackets with nothing in them.
661
- right_brackets = right_hand_side.count(")") * ")"
662
- parse_piece = f"{left_brackets} ({pos_tag} {parse_word}) {right_brackets}"
663
- else:
664
- # There are some bad annotations in the CONLL data.
665
- # They contain no information, so to make this explicit,
666
- # we just set the parse piece to be None which will result
667
- # in the overall parse tree being None.
668
- parse_piece = None
669
-
670
- lemmatised_word = conll_components[6]
671
- framenet_id = conll_components[7]
672
- word_sense = conll_components[8]
673
- speaker = conll_components[9]
674
-
675
- if not span_labels:
676
- # If this is the first word in the sentence, create
677
- # empty lists to collect the NER and SRL BIO labels.
678
- # We can't do this upfront, because we don't know how many
679
- # components we are collecting, as a sentence can have
680
- # variable numbers of SRL frames.
681
- span_labels = [[] for _ in conll_components[10:-1]]
682
- # Create variables representing the current label for each label
683
- # sequence we are collecting.
684
- current_span_labels = [None for _ in conll_components[10:-1]]
685
-
686
- self._process_span_annotations_for_word(conll_components[10:-1], span_labels, current_span_labels)
687
-
688
- # If any annotation marks this word as a verb predicate,
689
- # we need to record its index. This also has the side effect
690
- # of ordering the verbal predicates by their location in the
691
- # sentence, automatically aligning them with the annotations.
692
- word_is_verbal_predicate = any("(V" in x for x in conll_components[11:-1])
693
- if word_is_verbal_predicate:
694
- verbal_predicates.append(word)
695
-
696
- self._process_coref_span_annotations_for_word(conll_components[-1], index, clusters, coref_stacks)
697
-
698
- sentence.append(word)
699
- pos_tags.append(pos_tag)
700
- parse_pieces.append(parse_piece)
701
- predicate_lemmas.append(lemmatised_word if lemmatised_word != "-" else None)
702
- predicate_framenet_ids.append(framenet_id if framenet_id != "-" else None)
703
- word_senses.append(float(word_sense) if word_sense != "-" else None)
704
- speakers.append(speaker if speaker != "-" else None)
705
-
706
- named_entities = span_labels[0]
707
- srl_frames = [(predicate, labels) for predicate, labels in zip(verbal_predicates, span_labels[1:])]
708
-
709
- if all(parse_pieces):
710
- parse_tree = "".join(parse_pieces)
711
- else:
712
- parse_tree = None
713
- coref_span_tuples = {(cluster_id, span) for cluster_id, span_list in clusters.items() for span in span_list}
714
- return OntonotesSentence(
715
- document_id,
716
- sentence_id,
717
- sentence,
718
- pos_tags,
719
- parse_tree,
720
- predicate_lemmas,
721
- predicate_framenet_ids,
722
- word_senses,
723
- speakers,
724
- named_entities,
725
- srl_frames,
726
- coref_span_tuples,
727
- )
728
-
729
- @staticmethod
730
- def _process_coref_span_annotations_for_word(
731
- label: str,
732
- word_index: int,
733
- clusters: DefaultDict[int, List[Tuple[int, int]]],
734
- coref_stacks: DefaultDict[int, List[int]],
735
- ) -> None:
736
- """
737
- For a given coref label, add it to a currently open span(s), complete a span(s) or
738
- ignore it, if it is outside of all spans. This method mutates the clusters and coref_stacks
739
- dictionaries.
740
- # Parameters
741
- label : `str`
742
- The coref label for this word.
743
- word_index : `int`
744
- The word index into the sentence.
745
- clusters : `DefaultDict[int, List[Tuple[int, int]]]`
746
- A dictionary mapping cluster ids to lists of inclusive spans into the
747
- sentence.
748
- coref_stacks : `DefaultDict[int, List[int]]`
749
- Stacks for each cluster id to hold the start indices of active spans (spans
750
- which we are inside of when processing a given word). Spans with the same id
751
- can be nested, which is why we collect these opening spans on a stack, e.g:
752
- [Greg, the baker who referred to [himself]_ID1 as 'the bread man']_ID1
753
- """
754
- if label != "-":
755
- for segment in label.split("|"):
756
- # The conll representation of coref spans allows spans to
757
- # overlap. If spans end or begin at the same word, they are
758
- # separated by a "|".
759
- if segment[0] == "(":
760
- # The span begins at this word.
761
- if segment[-1] == ")":
762
- # The span begins and ends at this word (single word span).
763
- cluster_id = int(segment[1:-1])
764
- clusters[cluster_id].append((word_index, word_index))
765
- else:
766
- # The span is starting, so we record the index of the word.
767
- cluster_id = int(segment[1:])
768
- coref_stacks[cluster_id].append(word_index)
769
- else:
770
- # The span for this id is ending, but didn't start at this word.
771
- # Retrieve the start index from the document state and
772
- # add the span to the clusters for this id.
773
- cluster_id = int(segment[:-1])
774
- start = coref_stacks[cluster_id].pop()
775
- clusters[cluster_id].append((start, word_index))
776
-
777
- @staticmethod
778
- def _process_span_annotations_for_word(
779
- annotations: List[str],
780
- span_labels: List[List[str]],
781
- current_span_labels: List[Optional[str]],
782
- ) -> None:
783
- """
784
- Given a sequence of different label types for a single word and the current
785
- span label we are inside, compute the BIO tag for each label and append to a list.
786
- # Parameters
787
- annotations : `List[str]`
788
- A list of labels to compute BIO tags for.
789
- span_labels : `List[List[str]]`
790
- A list of lists, one for each annotation, to incrementally collect
791
- the BIO tags for a sequence.
792
- current_span_labels : `List[Optional[str]]`
793
- The currently open span per annotation type, or `None` if there is no open span.
794
- """
795
- for annotation_index, annotation in enumerate(annotations):
796
- # strip all bracketing information to
797
- # get the actual propbank label.
798
- label = annotation.strip("()*")
799
-
800
- if "(" in annotation:
801
- # Entering into a span for a particular semantic role label.
802
- # We append the label and set the current span for this annotation.
803
- bio_label = "B-" + label
804
- span_labels[annotation_index].append(bio_label)
805
- current_span_labels[annotation_index] = label
806
- elif current_span_labels[annotation_index] is not None:
807
- # If there's no '(' token, but the current_span_label is not None,
808
- # then we are inside a span.
809
- bio_label = "I-" + current_span_labels[annotation_index]
810
- span_labels[annotation_index].append(bio_label)
811
- else:
812
- # We're outside a span.
813
- span_labels[annotation_index].append("O")
814
- # Exiting a span, so we reset the current span label for this annotation.
815
- if ")" in annotation:
816
- current_span_labels[annotation_index] = None