Convert dataset to Parquet

#9
README.md CHANGED
@@ -29,7 +29,7 @@ pretty_name: CoNLL2012 shared task data based on OntoNotes 5.0
29
  tags:
30
  - semantic-role-labeling
31
  dataset_info:
32
- - config_name: english_v4
33
  features:
34
  - name: document_id
35
  dtype: string
@@ -40,58 +40,7 @@ dataset_info:
40
  - name: words
41
  sequence: string
42
  - name: pos_tags
43
- sequence:
44
- class_label:
45
- names:
46
- '0': XX
47
- '1': '``'
48
- '2': $
49
- '3': ''''''
50
- '4': ','
51
- '5': -LRB-
52
- '6': -RRB-
53
- '7': .
54
- '8': ':'
55
- '9': ADD
56
- '10': AFX
57
- '11': CC
58
- '12': CD
59
- '13': DT
60
- '14': EX
61
- '15': FW
62
- '16': HYPH
63
- '17': IN
64
- '18': JJ
65
- '19': JJR
66
- '20': JJS
67
- '21': LS
68
- '22': MD
69
- '23': NFP
70
- '24': NN
71
- '25': NNP
72
- '26': NNPS
73
- '27': NNS
74
- '28': PDT
75
- '29': POS
76
- '30': PRP
77
- '31': PRP$
78
- '32': RB
79
- '33': RBR
80
- '34': RBS
81
- '35': RP
82
- '36': SYM
83
- '37': TO
84
- '38': UH
85
- '39': VB
86
- '40': VBD
87
- '41': VBG
88
- '42': VBN
89
- '43': VBP
90
- '44': VBZ
91
- '45': WDT
92
- '46': WP
93
- '47': WP$
94
- '48': WRB
95
  - name: parse_tree
96
  dtype: string
97
  - name: predicate_lemmas
@@ -155,16 +104,16 @@ dataset_info:
155
  length: 3
156
  splits:
157
  - name: train
158
- num_bytes: 112246121
159
- num_examples: 1940
160
  - name: validation
161
- num_bytes: 14116925
162
- num_examples: 222
163
  - name: test
164
- num_bytes: 14709044
165
- num_examples: 222
166
- download_size: 193644139
167
- dataset_size: 141072090
168
  - config_name: chinese_v4
169
  features:
170
  - name: document_id
@@ -278,17 +227,17 @@ dataset_info:
278
  length: 3
279
  splits:
280
  - name: train
281
- num_bytes: 77195698
282
  num_examples: 1391
283
  - name: validation
284
- num_bytes: 10828169
285
  num_examples: 172
286
  - name: test
287
- num_bytes: 9585138
288
  num_examples: 166
289
- download_size: 193644139
290
- dataset_size: 97609005
291
- - config_name: arabic_v4
292
  features:
293
  - name: document_id
294
  dtype: string
@@ -299,7 +248,60 @@ dataset_info:
299
  - name: words
300
  sequence: string
301
  - name: pos_tags
302
- sequence: string
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303
  - name: parse_tree
304
  dtype: string
305
  - name: predicate_lemmas
@@ -363,17 +365,17 @@ dataset_info:
363
  length: 3
364
  splits:
365
  - name: train
366
- num_bytes: 42017761
367
- num_examples: 359
368
  - name: validation
369
- num_bytes: 4859292
370
- num_examples: 44
371
  - name: test
372
- num_bytes: 4900664
373
- num_examples: 44
374
- download_size: 193644139
375
- dataset_size: 51777717
376
- - config_name: english_v12
377
  features:
378
  - name: document_id
379
  dtype: string
@@ -391,53 +393,51 @@ dataset_info:
391
  '1': '``'
392
  '2': $
393
  '3': ''''''
394
- '4': '*'
395
- '5': ','
396
- '6': -LRB-
397
- '7': -RRB-
398
- '8': .
399
- '9': ':'
400
- '10': ADD
401
- '11': AFX
402
- '12': CC
403
- '13': CD
404
- '14': DT
405
- '15': EX
406
- '16': FW
407
- '17': HYPH
408
- '18': IN
409
- '19': JJ
410
- '20': JJR
411
- '21': JJS
412
- '22': LS
413
- '23': MD
414
- '24': NFP
415
- '25': NN
416
- '26': NNP
417
- '27': NNPS
418
- '28': NNS
419
- '29': PDT
420
- '30': POS
421
- '31': PRP
422
- '32': PRP$
423
- '33': RB
424
- '34': RBR
425
- '35': RBS
426
- '36': RP
427
- '37': SYM
428
- '38': TO
429
- '39': UH
430
- '40': VB
431
- '41': VBD
432
- '42': VBG
433
- '43': VBN
434
- '44': VBP
435
- '45': VBZ
436
- '46': VERB
437
- '47': WDT
438
- '48': WP
439
- '49': WP$
440
- '50': WRB
441
  - name: parse_tree
442
  dtype: string
443
  - name: predicate_lemmas
@@ -501,16 +501,49 @@ dataset_info:
501
  length: 3
502
  splits:
503
  - name: train
504
- num_bytes: 174173192
505
- num_examples: 10539
506
  - name: validation
507
- num_bytes: 24264804
508
- num_examples: 1370
509
  - name: test
510
- num_bytes: 18254144
511
- num_examples: 1200
512
- download_size: 193644139
513
- dataset_size: 216692140
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
514
  ---
515
 
516
  # Dataset Card for CoNLL2012 shared task data based on OntoNotes 5.0
 
29
  tags:
30
  - semantic-role-labeling
31
  dataset_info:
32
+ - config_name: arabic_v4
33
  features:
34
  - name: document_id
35
  dtype: string
 
40
  - name: words
41
  sequence: string
42
  - name: pos_tags
43
+ sequence: string
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  - name: parse_tree
45
  dtype: string
46
  - name: predicate_lemmas
 
104
  length: 3
105
  splits:
106
  - name: train
107
+ num_bytes: 41944525
108
+ num_examples: 359
109
  - name: validation
110
+ num_bytes: 4849937
111
+ num_examples: 44
112
  - name: test
113
+ num_bytes: 4890923
114
+ num_examples: 44
115
+ download_size: 12754930
116
+ dataset_size: 51685385
117
  - config_name: chinese_v4
118
  features:
119
  - name: document_id
 
227
  length: 3
228
  splits:
229
  - name: train
230
+ num_bytes: 71179293
231
  num_examples: 1391
232
  - name: validation
233
+ num_bytes: 9952651
234
  num_examples: 172
235
  - name: test
236
+ num_bytes: 8850344
237
  num_examples: 166
238
+ download_size: 12563307
239
+ dataset_size: 89982288
240
+ - config_name: english_v12
241
  features:
242
  - name: document_id
243
  dtype: string
 
248
  - name: words
249
  sequence: string
250
  - name: pos_tags
251
+ sequence:
252
+ class_label:
253
+ names:
254
+ '0': XX
255
+ '1': '``'
256
+ '2': $
257
+ '3': ''''''
258
+ '4': '*'
259
+ '5': ','
260
+ '6': -LRB-
261
+ '7': -RRB-
262
+ '8': .
263
+ '9': ':'
264
+ '10': ADD
265
+ '11': AFX
266
+ '12': CC
267
+ '13': CD
268
+ '14': DT
269
+ '15': EX
270
+ '16': FW
271
+ '17': HYPH
272
+ '18': IN
273
+ '19': JJ
274
+ '20': JJR
275
+ '21': JJS
276
+ '22': LS
277
+ '23': MD
278
+ '24': NFP
279
+ '25': NN
280
+ '26': NNP
281
+ '27': NNPS
282
+ '28': NNS
283
+ '29': PDT
284
+ '30': POS
285
+ '31': PRP
286
+ '32': PRP$
287
+ '33': RB
288
+ '34': RBR
289
+ '35': RBS
290
+ '36': RP
291
+ '37': SYM
292
+ '38': TO
293
+ '39': UH
294
+ '40': VB
295
+ '41': VBD
296
+ '42': VBG
297
+ '43': VBN
298
+ '44': VBP
299
+ '45': VBZ
300
+ '46': VERB
301
+ '47': WDT
302
+ '48': WP
303
+ '49': WP$
304
+ '50': WRB
305
  - name: parse_tree
306
  dtype: string
307
  - name: predicate_lemmas
 
365
  length: 3
366
  splits:
367
  - name: train
368
+ num_bytes: 173938007
369
+ num_examples: 10539
370
  - name: validation
371
+ num_bytes: 24249220
372
+ num_examples: 1370
373
  - name: test
374
+ num_bytes: 18240264
375
+ num_examples: 1200
376
+ download_size: 32775165
377
+ dataset_size: 216427491
378
+ - config_name: english_v4
379
  features:
380
  - name: document_id
381
  dtype: string
 
393
  '1': '``'
394
  '2': $
395
  '3': ''''''
396
+ '4': ','
397
+ '5': -LRB-
398
+ '6': -RRB-
399
+ '7': .
400
+ '8': ':'
401
+ '9': ADD
402
+ '10': AFX
403
+ '11': CC
404
+ '12': CD
405
+ '13': DT
406
+ '14': EX
407
+ '15': FW
408
+ '16': HYPH
409
+ '17': IN
410
+ '18': JJ
411
+ '19': JJR
412
+ '20': JJS
413
+ '21': LS
414
+ '22': MD
415
+ '23': NFP
416
+ '24': NN
417
+ '25': NNP
418
+ '26': NNPS
419
+ '27': NNS
420
+ '28': PDT
421
+ '29': POS
422
+ '30': PRP
423
+ '31': PRP$
424
+ '32': RB
425
+ '33': RBR
426
+ '34': RBS
427
+ '35': RP
428
+ '36': SYM
429
+ '37': TO
430
+ '38': UH
431
+ '39': VB
432
+ '40': VBD
433
+ '41': VBG
434
+ '42': VBN
435
+ '43': VBP
436
+ '44': VBZ
437
+ '45': WDT
438
+ '46': WP
439
+ '47': WP$
440
+ '48': WRB
 
 
441
  - name: parse_tree
442
  dtype: string
443
  - name: predicate_lemmas
 
501
  length: 3
502
  splits:
503
  - name: train
504
+ num_bytes: 112145127
505
+ num_examples: 1940
506
  - name: validation
507
+ num_bytes: 14104258
508
+ num_examples: 222
509
  - name: test
510
+ num_bytes: 14696106
511
+ num_examples: 222
512
+ download_size: 21191727
513
+ dataset_size: 140945491
514
+ configs:
515
+ - config_name: arabic_v4
516
+ data_files:
517
+ - split: train
518
+ path: arabic_v4/train-*
519
+ - split: validation
520
+ path: arabic_v4/validation-*
521
+ - split: test
522
+ path: arabic_v4/test-*
523
+ - config_name: chinese_v4
524
+ data_files:
525
+ - split: train
526
+ path: chinese_v4/train-*
527
+ - split: validation
528
+ path: chinese_v4/validation-*
529
+ - split: test
530
+ path: chinese_v4/test-*
531
+ - config_name: english_v12
532
+ data_files:
533
+ - split: train
534
+ path: english_v12/train-*
535
+ - split: validation
536
+ path: english_v12/validation-*
537
+ - split: test
538
+ path: english_v12/test-*
539
+ - config_name: english_v4
540
+ data_files:
541
+ - split: train
542
+ path: english_v4/train-*
543
+ - split: validation
544
+ path: english_v4/validation-*
545
+ - split: test
546
+ path: english_v4/test-*
547
  ---
548
 
549
  # Dataset Card for CoNLL2012 shared task data based on OntoNotes 5.0
arabic_v4/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d429f290f09735fe188021e4732457de7d2a51f600ac6d7b7f055d9ba5f9d5f6
3
+ size 1201311
arabic_v4/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fb2ff01847ef1af190776898a1dac4cb875e86d25150aa7ae04ec61051c2854
3
+ size 10360827
arabic_v4/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db273e763fa45e2cd85264e0348338d3a2c7c5fe79ccd965756d4cb0452eba57
3
+ size 1192792
chinese_v4/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b203626e1fa70c88f2d94ee5cf12e67625f728948ebdb895fa264cf67219c88
3
+ size 1272387
chinese_v4/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9adfd1abcdcb336d89592f8c19e76a5b402c3b301782f41564bcd9cdaee7e81a
3
+ size 9808971
chinese_v4/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14b009adc9f9455340a9e5032b8634aa70824f9a0ab737102d2c332d827557a8
3
+ size 1481949
conll2012_ontonotesv5.py DELETED
@@ -1,816 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """CoNLL2012 shared task data based on OntoNotes 5.0"""
16
-
17
- import glob
18
- import os
19
- from collections import defaultdict
20
- from typing import DefaultDict, Iterator, List, Optional, Tuple
21
-
22
- import datasets
23
-
24
-
25
- _CITATION = """\
26
- @inproceedings{pradhan-etal-2013-towards,
27
- title = "Towards Robust Linguistic Analysis using {O}nto{N}otes",
28
- author = {Pradhan, Sameer and
29
- Moschitti, Alessandro and
30
- Xue, Nianwen and
31
- Ng, Hwee Tou and
32
- Bj{\"o}rkelund, Anders and
33
- Uryupina, Olga and
34
- Zhang, Yuchen and
35
- Zhong, Zhi},
36
- booktitle = "Proceedings of the Seventeenth Conference on Computational Natural Language Learning",
37
- month = aug,
38
- year = "2013",
39
- address = "Sofia, Bulgaria",
40
- publisher = "Association for Computational Linguistics",
41
- url = "https://aclanthology.org/W13-3516",
42
- pages = "143--152",
43
- }
44
-
45
- Ralph Weischedel, Martha Palmer, Mitchell Marcus, Eduard Hovy, Sameer Pradhan, \
46
- Lance Ramshaw, Nianwen Xue, Ann Taylor, Jeff Kaufman, Michelle Franchini, \
47
- Mohammed El-Bachouti, Robert Belvin, Ann Houston. \
48
- OntoNotes Release 5.0 LDC2013T19. \
49
- Web Download. Philadelphia: Linguistic Data Consortium, 2013.
50
- """
51
-
52
- _DESCRIPTION = """\
53
- OntoNotes v5.0 is the final version of OntoNotes corpus, and is a large-scale, multi-genre,
54
- multilingual corpus manually annotated with syntactic, semantic and discourse information.
55
-
56
- This dataset is the version of OntoNotes v5.0 extended and is used in the CoNLL-2012 shared task.
57
- It includes v4 train/dev and v9 test data for English/Chinese/Arabic and corrected version v12 train/dev/test data (English only).
58
-
59
- The source of data is the Mendeley Data repo [ontonotes-conll2012](https://data.mendeley.com/datasets/zmycy7t9h9), which seems to be as the same as the official data, but users should use this dataset on their own responsibility.
60
-
61
- See also summaries from paperwithcode, [OntoNotes 5.0](https://paperswithcode.com/dataset/ontonotes-5-0) and [CoNLL-2012](https://paperswithcode.com/dataset/conll-2012-1)
62
-
63
- For more detailed info of the dataset like annotation, tag set, etc., you can refer to the documents in the Mendeley repo mentioned above.
64
- """
65
-
66
- _URL = "https://data.mendeley.com/public-files/datasets/zmycy7t9h9/files/b078e1c4-f7a4-4427-be7f-9389967831ef/file_downloaded"
67
-
68
-
69
- class Conll2012Ontonotesv5Config(datasets.BuilderConfig):
70
- """BuilderConfig for the CoNLL formatted OntoNotes dataset."""
71
-
72
- def __init__(self, language=None, conll_version=None, **kwargs):
73
- """BuilderConfig for the CoNLL formatted OntoNotes dataset.
74
-
75
- Args:
76
- language: string, one of the language {"english", "chinese", "arabic"} .
77
- conll_version: string, "v4" or "v12". Note there is only English v12.
78
- **kwargs: keyword arguments forwarded to super.
79
- """
80
- assert language in ["english", "chinese", "arabic"]
81
- assert conll_version in ["v4", "v12"]
82
- if conll_version == "v12":
83
- assert language == "english"
84
- super(Conll2012Ontonotesv5Config, self).__init__(
85
- name=f"{language}_{conll_version}",
86
- description=f"{conll_version} of CoNLL formatted OntoNotes dataset for {language}.",
87
- version=datasets.Version("1.0.0"), # hf dataset script version
88
- **kwargs,
89
- )
90
- self.language = language
91
- self.conll_version = conll_version
92
-
93
-
94
- class Conll2012Ontonotesv5(datasets.GeneratorBasedBuilder):
95
- """The CoNLL formatted OntoNotes dataset."""
96
-
97
- BUILDER_CONFIGS = [
98
- Conll2012Ontonotesv5Config(
99
- language=lang,
100
- conll_version="v4",
101
- )
102
- for lang in ["english", "chinese", "arabic"]
103
- ] + [
104
- Conll2012Ontonotesv5Config(
105
- language="english",
106
- conll_version="v12",
107
- )
108
- ]
109
-
110
- def _info(self):
111
- lang = self.config.language
112
- conll_version = self.config.conll_version
113
- if lang == "arabic":
114
- pos_tag_feature = datasets.Value("string")
115
- else:
116
- tag_set = _POS_TAGS[f"{lang}_{conll_version}"]
117
- pos_tag_feature = datasets.ClassLabel(num_classes=len(tag_set), names=tag_set)
118
-
119
- return datasets.DatasetInfo(
120
- description=_DESCRIPTION,
121
- features=datasets.Features(
122
- {
123
- "document_id": datasets.Value("string"),
124
- "sentences": [
125
- {
126
- "part_id": datasets.Value("int32"),
127
- "words": datasets.Sequence(datasets.Value("string")),
128
- "pos_tags": datasets.Sequence(pos_tag_feature),
129
- "parse_tree": datasets.Value("string"),
130
- "predicate_lemmas": datasets.Sequence(datasets.Value("string")),
131
- "predicate_framenet_ids": datasets.Sequence(datasets.Value("string")),
132
- "word_senses": datasets.Sequence(datasets.Value("float32")),
133
- "speaker": datasets.Value("string"),
134
- "named_entities": datasets.Sequence(
135
- datasets.ClassLabel(num_classes=37, names=_NAMED_ENTITY_TAGS)
136
- ),
137
- "srl_frames": [
138
- {
139
- "verb": datasets.Value("string"),
140
- "frames": datasets.Sequence(datasets.Value("string")),
141
- }
142
- ],
143
- "coref_spans": datasets.Sequence(datasets.Sequence(datasets.Value("int32"), length=3)),
144
- }
145
- ],
146
- }
147
- ),
148
- homepage="https://conll.cemantix.org/2012/introduction.html",
149
- citation=_CITATION,
150
- )
151
-
152
- def _split_generators(self, dl_manager):
153
- lang = self.config.language
154
- conll_version = self.config.conll_version
155
- dl_dir = dl_manager.download_and_extract(_URL)
156
- data_dir = os.path.join(dl_dir, f"conll-2012/{conll_version}/data")
157
-
158
- return [
159
- datasets.SplitGenerator(
160
- name=datasets.Split.TRAIN,
161
- gen_kwargs={"conll_files_directory": os.path.join(data_dir, f"train/data/{lang}")},
162
- ),
163
- datasets.SplitGenerator(
164
- name=datasets.Split.VALIDATION,
165
- gen_kwargs={"conll_files_directory": os.path.join(data_dir, f"development/data/{lang}")},
166
- ),
167
- datasets.SplitGenerator(
168
- name=datasets.Split.TEST,
169
- gen_kwargs={"conll_files_directory": os.path.join(data_dir, f"test/data/{lang}")},
170
- ),
171
- ]
172
-
173
- def _generate_examples(self, conll_files_directory):
174
- conll_files = sorted(glob.glob(os.path.join(conll_files_directory, "**/*gold_conll"), recursive=True))
175
- for idx, conll_file in enumerate(conll_files):
176
- sentences = []
177
- for sent in Ontonotes().sentence_iterator(conll_file):
178
- document_id = sent.document_id
179
- sentences.append(
180
- {
181
- "part_id": sent.sentence_id, # should be part id, according to https://conll.cemantix.org/2012/data.html
182
- "words": sent.words,
183
- "pos_tags": sent.pos_tags,
184
- "parse_tree": sent.parse_tree,
185
- "predicate_lemmas": sent.predicate_lemmas,
186
- "predicate_framenet_ids": sent.predicate_framenet_ids,
187
- "word_senses": sent.word_senses,
188
- "speaker": sent.speakers[0],
189
- "named_entities": sent.named_entities,
190
- "srl_frames": [{"verb": f[0], "frames": f[1]} for f in sent.srl_frames],
191
- "coref_spans": [(c[0], *c[1]) for c in sent.coref_spans],
192
- }
193
- )
194
- yield idx, {"document_id": document_id, "sentences": sentences}
195
-
196
-
197
- # --------------------------------------------------------------------------------------------------------
198
- # Tag set
199
- _NAMED_ENTITY_TAGS = [
200
- "O", # out of named entity
201
- "B-PERSON",
202
- "I-PERSON",
203
- "B-NORP",
204
- "I-NORP",
205
- "B-FAC", # FACILITY
206
- "I-FAC",
207
- "B-ORG", # ORGANIZATION
208
- "I-ORG",
209
- "B-GPE",
210
- "I-GPE",
211
- "B-LOC",
212
- "I-LOC",
213
- "B-PRODUCT",
214
- "I-PRODUCT",
215
- "B-DATE",
216
- "I-DATE",
217
- "B-TIME",
218
- "I-TIME",
219
- "B-PERCENT",
220
- "I-PERCENT",
221
- "B-MONEY",
222
- "I-MONEY",
223
- "B-QUANTITY",
224
- "I-QUANTITY",
225
- "B-ORDINAL",
226
- "I-ORDINAL",
227
- "B-CARDINAL",
228
- "I-CARDINAL",
229
- "B-EVENT",
230
- "I-EVENT",
231
- "B-WORK_OF_ART",
232
- "I-WORK_OF_ART",
233
- "B-LAW",
234
- "I-LAW",
235
- "B-LANGUAGE",
236
- "I-LANGUAGE",
237
- ]
238
-
239
- _POS_TAGS = {
240
- "english_v4": [
241
- "XX", # missing
242
- "``",
243
- "$",
244
- "''",
245
- ",",
246
- "-LRB-", # (
247
- "-RRB-", # )
248
- ".",
249
- ":",
250
- "ADD",
251
- "AFX",
252
- "CC",
253
- "CD",
254
- "DT",
255
- "EX",
256
- "FW",
257
- "HYPH",
258
- "IN",
259
- "JJ",
260
- "JJR",
261
- "JJS",
262
- "LS",
263
- "MD",
264
- "NFP",
265
- "NN",
266
- "NNP",
267
- "NNPS",
268
- "NNS",
269
- "PDT",
270
- "POS",
271
- "PRP",
272
- "PRP$",
273
- "RB",
274
- "RBR",
275
- "RBS",
276
- "RP",
277
- "SYM",
278
- "TO",
279
- "UH",
280
- "VB",
281
- "VBD",
282
- "VBG",
283
- "VBN",
284
- "VBP",
285
- "VBZ",
286
- "WDT",
287
- "WP",
288
- "WP$",
289
- "WRB",
290
- ], # 49
291
- "english_v12": [
292
- "XX", # misssing
293
- "``",
294
- "$",
295
- "''",
296
- "*",
297
- ",",
298
- "-LRB-", # (
299
- "-RRB-", # )
300
- ".",
301
- ":",
302
- "ADD",
303
- "AFX",
304
- "CC",
305
- "CD",
306
- "DT",
307
- "EX",
308
- "FW",
309
- "HYPH",
310
- "IN",
311
- "JJ",
312
- "JJR",
313
- "JJS",
314
- "LS",
315
- "MD",
316
- "NFP",
317
- "NN",
318
- "NNP",
319
- "NNPS",
320
- "NNS",
321
- "PDT",
322
- "POS",
323
- "PRP",
324
- "PRP$",
325
- "RB",
326
- "RBR",
327
- "RBS",
328
- "RP",
329
- "SYM",
330
- "TO",
331
- "UH",
332
- "VB",
333
- "VBD",
334
- "VBG",
335
- "VBN",
336
- "VBP",
337
- "VBZ",
338
- "VERB",
339
- "WDT",
340
- "WP",
341
- "WP$",
342
- "WRB",
343
- ], # 51
344
- "chinese_v4": [
345
- "X", # missing
346
- "AD",
347
- "AS",
348
- "BA",
349
- "CC",
350
- "CD",
351
- "CS",
352
- "DEC",
353
- "DEG",
354
- "DER",
355
- "DEV",
356
- "DT",
357
- "ETC",
358
- "FW",
359
- "IJ",
360
- "INF",
361
- "JJ",
362
- "LB",
363
- "LC",
364
- "M",
365
- "MSP",
366
- "NN",
367
- "NR",
368
- "NT",
369
- "OD",
370
- "ON",
371
- "P",
372
- "PN",
373
- "PU",
374
- "SB",
375
- "SP",
376
- "URL",
377
- "VA",
378
- "VC",
379
- "VE",
380
- "VV",
381
- ], # 36
382
- }
383
-
384
- # --------------------------------------------------------------------------------------------------------
385
- # The CoNLL(2012) file reader
386
- # Modified the original code to get rid of extra package dependency.
387
- # Original code: https://github.com/allenai/allennlp-models/blob/main/allennlp_models/common/ontonotes.py
388
-
389
-
390
- class OntonotesSentence:
391
- """
392
- A class representing the annotations available for a single CONLL formatted sentence.
393
- # Parameters
394
- document_id : `str`
395
- This is a variation on the document filename
396
- sentence_id : `int`
397
- The integer ID of the sentence within a document.
398
- words : `List[str]`
399
- This is the tokens as segmented/tokenized in the bank.
400
- pos_tags : `List[str]`
401
- This is the Penn-Treebank-style part of speech. When parse information is missing,
402
- all parts of speech except the one for which there is some sense or proposition
403
- annotation are marked with a XX tag. The verb is marked with just a VERB tag.
404
- parse_tree : `nltk.Tree`
405
- An nltk Tree representing the parse. It includes POS tags as pre-terminal nodes.
406
- When the parse information is missing, the parse will be `None`.
407
- predicate_lemmas : `List[Optional[str]]`
408
- The predicate lemma of the words for which we have semantic role
409
- information or word sense information. All other indices are `None`.
410
- predicate_framenet_ids : `List[Optional[int]]`
411
- The PropBank frameset ID of the lemmas in `predicate_lemmas`, or `None`.
412
- word_senses : `List[Optional[float]]`
413
- The word senses for the words in the sentence, or `None`. These are floats
414
- because the word sense can have values after the decimal, like `1.1`.
415
- speakers : `List[Optional[str]]`
416
- The speaker information for the words in the sentence, if present, or `None`
417
- This is the speaker or author name where available. Mostly in Broadcast Conversation
418
- and Web Log data. When not available the rows are marked with an "-".
419
- named_entities : `List[str]`
420
- The BIO tags for named entities in the sentence.
421
- srl_frames : `List[Tuple[str, List[str]]]`
422
- A dictionary keyed by the verb in the sentence for the given
423
- Propbank frame labels, in a BIO format.
424
- coref_spans : `Set[TypedSpan]`
425
- The spans for entity mentions involved in coreference resolution within the sentence.
426
- Each element is a tuple composed of (cluster_id, (start_index, end_index)). Indices
427
- are `inclusive`.
428
- """
429
-
430
- def __init__(
431
- self,
432
- document_id: str,
433
- sentence_id: int,
434
- words: List[str],
435
- pos_tags: List[str],
436
- parse_tree: Optional[str],
437
- predicate_lemmas: List[Optional[str]],
438
- predicate_framenet_ids: List[Optional[str]],
439
- word_senses: List[Optional[float]],
440
- speakers: List[Optional[str]],
441
- named_entities: List[str],
442
- srl_frames: List[Tuple[str, List[str]]],
443
- coref_spans,
444
- ) -> None:
445
-
446
- self.document_id = document_id
447
- self.sentence_id = sentence_id
448
- self.words = words
449
- self.pos_tags = pos_tags
450
- self.parse_tree = parse_tree
451
- self.predicate_lemmas = predicate_lemmas
452
- self.predicate_framenet_ids = predicate_framenet_ids
453
- self.word_senses = word_senses
454
- self.speakers = speakers
455
- self.named_entities = named_entities
456
- self.srl_frames = srl_frames
457
- self.coref_spans = coref_spans
458
-
459
-
460
- class Ontonotes:
461
- """
462
- This `DatasetReader` is designed to read in the English OntoNotes v5.0 data
463
- in the format used by the CoNLL 2011/2012 shared tasks. In order to use this
464
- Reader, you must follow the instructions provided [here (v12 release):]
465
- (https://cemantix.org/data/ontonotes.html), which will allow you to download
466
- the CoNLL style annotations for the OntoNotes v5.0 release -- LDC2013T19.tgz
467
- obtained from LDC.
468
- Once you have run the scripts on the extracted data, you will have a folder
469
- structured as follows:
470
- ```
471
- conll-formatted-ontonotes-5.0/
472
- ── data
473
- ├── development
474
- └── data
475
- └── english
476
- └── annotations
477
- ├── bc
478
- ├── bn
479
- ├── mz
480
- ├── nw
481
- ├── pt
482
- ├── tc
483
- └── wb
484
- ├── test
485
- └── data
486
- └── english
487
- └── annotations
488
- ├── bc
489
- ├── bn
490
- ├── mz
491
- ├── nw
492
- ├── pt
493
- ├── tc
494
- └── wb
495
- └── train
496
- └── data
497
- └── english
498
- └── annotations
499
- ├── bc
500
- ├── bn
501
- ├── mz
502
- ├── nw
503
- ├── pt
504
- ├── tc
505
- └── wb
506
- ```
507
- The file path provided to this class can then be any of the train, test or development
508
- directories(or the top level data directory, if you are not utilizing the splits).
509
- The data has the following format, ordered by column.
510
- 1. Document ID : `str`
511
- This is a variation on the document filename
512
- 2. Part number : `int`
513
- Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
514
- 3. Word number : `int`
515
- This is the word index of the word in that sentence.
516
- 4. Word : `str`
517
- This is the token as segmented/tokenized in the Treebank. Initially the `*_skel` file
518
- contain the placeholder [WORD] which gets replaced by the actual token from the
519
- Treebank which is part of the OntoNotes release.
520
- 5. POS Tag : `str`
521
- This is the Penn Treebank style part of speech. When parse information is missing,
522
- all part of speeches except the one for which there is some sense or proposition
523
- annotation are marked with a XX tag. The verb is marked with just a VERB tag.
524
- 6. Parse bit : `str`
525
- This is the bracketed structure broken before the first open parenthesis in the parse,
526
- and the word/part-of-speech leaf replaced with a `*`. When the parse information is
527
- missing, the first word of a sentence is tagged as `(TOP*` and the last word is tagged
528
- as `*)` and all intermediate words are tagged with a `*`.
529
- 7. Predicate lemma : `str`
530
- The predicate lemma is mentioned for the rows for which we have semantic role
531
- information or word sense information. All other rows are marked with a "-".
532
- 8. Predicate Frameset ID : `int`
533
- The PropBank frameset ID of the predicate in Column 7.
534
- 9. Word sense : `float`
535
- This is the word sense of the word in Column 3.
536
- 10. Speaker/Author : `str`
537
- This is the speaker or author name where available. Mostly in Broadcast Conversation
538
- and Web Log data. When not available the rows are marked with an "-".
539
- 11. Named Entities : `str`
540
- These columns identifies the spans representing various named entities. For documents
541
- which do not have named entity annotation, each line is represented with an `*`.
542
- 12. Predicate Arguments : `str`
543
- There is one column each of predicate argument structure information for the predicate
544
- mentioned in Column 7. If there are no predicates tagged in a sentence this is a
545
- single column with all rows marked with an `*`.
546
- -1. Co-reference : `str`
547
- Co-reference chain information encoded in a parenthesis structure. For documents that do
548
- not have co-reference annotations, each line is represented with a "-".
549
- """
550
-
551
- def dataset_iterator(self, file_path: str) -> Iterator[OntonotesSentence]:
552
- """
553
- An iterator over the entire dataset, yielding all sentences processed.
554
- """
555
- for conll_file in self.dataset_path_iterator(file_path):
556
- yield from self.sentence_iterator(conll_file)
557
-
558
- @staticmethod
559
- def dataset_path_iterator(file_path: str) -> Iterator[str]:
560
- """
561
- An iterator returning file_paths in a directory
562
- containing CONLL-formatted files.
563
- """
564
- for root, _, files in list(os.walk(file_path)):
565
- for data_file in sorted(files):
566
- # These are a relic of the dataset pre-processing. Every
567
- # file will be duplicated - one file called filename.gold_skel
568
- # and one generated from the preprocessing called filename.gold_conll.
569
- if not data_file.endswith("gold_conll"):
570
- continue
571
-
572
- yield os.path.join(root, data_file)
573
-
574
- def dataset_document_iterator(self, file_path: str) -> Iterator[List[OntonotesSentence]]:
575
- """
576
- An iterator over CONLL formatted files which yields documents, regardless
577
- of the number of document annotations in a particular file. This is useful
578
- for conll data which has been preprocessed, such as the preprocessing which
579
- takes place for the 2012 CONLL Coreference Resolution task.
580
- """
581
- with open(file_path, "r", encoding="utf8") as open_file:
582
- conll_rows = []
583
- document: List[OntonotesSentence] = []
584
- for line in open_file:
585
- line = line.strip()
586
- if line != "" and not line.startswith("#"):
587
- # Non-empty line. Collect the annotation.
588
- conll_rows.append(line)
589
- else:
590
- if conll_rows:
591
- document.append(self._conll_rows_to_sentence(conll_rows))
592
- conll_rows = []
593
- if line.startswith("#end document"):
594
- yield document
595
- document = []
596
- if document:
597
- # Collect any stragglers or files which might not
598
- # have the '#end document' format for the end of the file.
599
- yield document
600
-
601
- def sentence_iterator(self, file_path: str) -> Iterator[OntonotesSentence]:
602
- """
603
- An iterator over the sentences in an individual CONLL formatted file.
604
- """
605
- for document in self.dataset_document_iterator(file_path):
606
- for sentence in document:
607
- yield sentence
608
-
609
- def _conll_rows_to_sentence(self, conll_rows: List[str]) -> OntonotesSentence:
610
- document_id: str = None
611
- sentence_id: int = None
612
- # The words in the sentence.
613
- sentence: List[str] = []
614
- # The pos tags of the words in the sentence.
615
- pos_tags: List[str] = []
616
- # the pieces of the parse tree.
617
- parse_pieces: List[str] = []
618
- # The lemmatised form of the words in the sentence which
619
- # have SRL or word sense information.
620
- predicate_lemmas: List[str] = []
621
- # The FrameNet ID of the predicate.
622
- predicate_framenet_ids: List[str] = []
623
- # The sense of the word, if available.
624
- word_senses: List[float] = []
625
- # The current speaker, if available.
626
- speakers: List[str] = []
627
-
628
- verbal_predicates: List[str] = []
629
- span_labels: List[List[str]] = []
630
- current_span_labels: List[str] = []
631
-
632
- # Cluster id -> List of (start_index, end_index) spans.
633
- clusters: DefaultDict[int, List[Tuple[int, int]]] = defaultdict(list)
634
- # Cluster id -> List of start_indices which are open for this id.
635
- coref_stacks: DefaultDict[int, List[int]] = defaultdict(list)
636
-
637
- for index, row in enumerate(conll_rows):
638
- conll_components = row.split()
639
-
640
- document_id = conll_components[0]
641
- sentence_id = int(conll_components[1])
642
- word = conll_components[3]
643
- pos_tag = conll_components[4]
644
- parse_piece = conll_components[5]
645
-
646
- # Replace brackets in text and pos tags
647
- # with a different token for parse trees.
648
- if pos_tag != "XX" and word != "XX":
649
- if word == "(":
650
- parse_word = "-LRB-"
651
- elif word == ")":
652
- parse_word = "-RRB-"
653
- else:
654
- parse_word = word
655
- if pos_tag == "(":
656
- pos_tag = "-LRB-"
657
- if pos_tag == ")":
658
- pos_tag = "-RRB-"
659
- (left_brackets, right_hand_side) = parse_piece.split("*")
660
- # only keep ')' if there are nested brackets with nothing in them.
661
- right_brackets = right_hand_side.count(")") * ")"
662
- parse_piece = f"{left_brackets} ({pos_tag} {parse_word}) {right_brackets}"
663
- else:
664
- # There are some bad annotations in the CONLL data.
665
- # They contain no information, so to make this explicit,
666
- # we just set the parse piece to be None which will result
667
- # in the overall parse tree being None.
668
- parse_piece = None
669
-
670
- lemmatised_word = conll_components[6]
671
- framenet_id = conll_components[7]
672
- word_sense = conll_components[8]
673
- speaker = conll_components[9]
674
-
675
- if not span_labels:
676
- # If this is the first word in the sentence, create
677
- # empty lists to collect the NER and SRL BIO labels.
678
- # We can't do this upfront, because we don't know how many
679
- # components we are collecting, as a sentence can have
680
- # variable numbers of SRL frames.
681
- span_labels = [[] for _ in conll_components[10:-1]]
682
- # Create variables representing the current label for each label
683
- # sequence we are collecting.
684
- current_span_labels = [None for _ in conll_components[10:-1]]
685
-
686
- self._process_span_annotations_for_word(conll_components[10:-1], span_labels, current_span_labels)
687
-
688
- # If any annotation marks this word as a verb predicate,
689
- # we need to record its index. This also has the side effect
690
- # of ordering the verbal predicates by their location in the
691
- # sentence, automatically aligning them with the annotations.
692
- word_is_verbal_predicate = any("(V" in x for x in conll_components[11:-1])
693
- if word_is_verbal_predicate:
694
- verbal_predicates.append(word)
695
-
696
- self._process_coref_span_annotations_for_word(conll_components[-1], index, clusters, coref_stacks)
697
-
698
- sentence.append(word)
699
- pos_tags.append(pos_tag)
700
- parse_pieces.append(parse_piece)
701
- predicate_lemmas.append(lemmatised_word if lemmatised_word != "-" else None)
702
- predicate_framenet_ids.append(framenet_id if framenet_id != "-" else None)
703
- word_senses.append(float(word_sense) if word_sense != "-" else None)
704
- speakers.append(speaker if speaker != "-" else None)
705
-
706
- named_entities = span_labels[0]
707
- srl_frames = [(predicate, labels) for predicate, labels in zip(verbal_predicates, span_labels[1:])]
708
-
709
- if all(parse_pieces):
710
- parse_tree = "".join(parse_pieces)
711
- else:
712
- parse_tree = None
713
- coref_span_tuples = {(cluster_id, span) for cluster_id, span_list in clusters.items() for span in span_list}
714
- return OntonotesSentence(
715
- document_id,
716
- sentence_id,
717
- sentence,
718
- pos_tags,
719
- parse_tree,
720
- predicate_lemmas,
721
- predicate_framenet_ids,
722
- word_senses,
723
- speakers,
724
- named_entities,
725
- srl_frames,
726
- coref_span_tuples,
727
- )
728
-
729
- @staticmethod
730
- def _process_coref_span_annotations_for_word(
731
- label: str,
732
- word_index: int,
733
- clusters: DefaultDict[int, List[Tuple[int, int]]],
734
- coref_stacks: DefaultDict[int, List[int]],
735
- ) -> None:
736
- """
737
- For a given coref label, add it to a currently open span(s), complete a span(s) or
738
- ignore it, if it is outside of all spans. This method mutates the clusters and coref_stacks
739
- dictionaries.
740
- # Parameters
741
- label : `str`
742
- The coref label for this word.
743
- word_index : `int`
744
- The word index into the sentence.
745
- clusters : `DefaultDict[int, List[Tuple[int, int]]]`
746
- A dictionary mapping cluster ids to lists of inclusive spans into the
747
- sentence.
748
- coref_stacks : `DefaultDict[int, List[int]]`
749
- Stacks for each cluster id to hold the start indices of active spans (spans
750
- which we are inside of when processing a given word). Spans with the same id
751
- can be nested, which is why we collect these opening spans on a stack, e.g:
752
- [Greg, the baker who referred to [himself]_ID1 as 'the bread man']_ID1
753
- """
754
- if label != "-":
755
- for segment in label.split("|"):
756
- # The conll representation of coref spans allows spans to
757
- # overlap. If spans end or begin at the same word, they are
758
- # separated by a "|".
759
- if segment[0] == "(":
760
- # The span begins at this word.
761
- if segment[-1] == ")":
762
- # The span begins and ends at this word (single word span).
763
- cluster_id = int(segment[1:-1])
764
- clusters[cluster_id].append((word_index, word_index))
765
- else:
766
- # The span is starting, so we record the index of the word.
767
- cluster_id = int(segment[1:])
768
- coref_stacks[cluster_id].append(word_index)
769
- else:
770
- # The span for this id is ending, but didn't start at this word.
771
- # Retrieve the start index from the document state and
772
- # add the span to the clusters for this id.
773
- cluster_id = int(segment[:-1])
774
- start = coref_stacks[cluster_id].pop()
775
- clusters[cluster_id].append((start, word_index))
776
-
777
- @staticmethod
778
- def _process_span_annotations_for_word(
779
- annotations: List[str],
780
- span_labels: List[List[str]],
781
- current_span_labels: List[Optional[str]],
782
- ) -> None:
783
- """
784
- Given a sequence of different label types for a single word and the current
785
- span label we are inside, compute the BIO tag for each label and append to a list.
786
- # Parameters
787
- annotations : `List[str]`
788
- A list of labels to compute BIO tags for.
789
- span_labels : `List[List[str]]`
790
- A list of lists, one for each annotation, to incrementally collect
791
- the BIO tags for a sequence.
792
- current_span_labels : `List[Optional[str]]`
793
- The currently open span per annotation type, or `None` if there is no open span.
794
- """
795
- for annotation_index, annotation in enumerate(annotations):
796
- # strip all bracketing information to
797
- # get the actual propbank label.
798
- label = annotation.strip("()*")
799
-
800
- if "(" in annotation:
801
- # Entering into a span for a particular semantic role label.
802
- # We append the label and set the current span for this annotation.
803
- bio_label = "B-" + label
804
- span_labels[annotation_index].append(bio_label)
805
- current_span_labels[annotation_index] = label
806
- elif current_span_labels[annotation_index] is not None:
807
- # If there's no '(' token, but the current_span_label is not None,
808
- # then we are inside a span.
809
- bio_label = "I-" + current_span_labels[annotation_index]
810
- span_labels[annotation_index].append(bio_label)
811
- else:
812
- # We're outside a span.
813
- span_labels[annotation_index].append("O")
814
- # Exiting a span, so we reset the current span label for this annotation.
815
- if ")" in annotation:
816
- current_span_labels[annotation_index] = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
english_v12/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f849e02a1142b421f3701eda66dad530950f75679b9e39b425efe1156498e673
3
+ size 2811953
english_v12/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:685101b33706d67cb1e511f36281ba330a120fabdd98544a91e4c61ddd4e8359
3
+ size 26195140
english_v12/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1dd2d7354adcb47bb5f79fb68a3f36c50f2edf8d615d2479d5086522f68dd06
3
+ size 3768072
english_v4/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ca69672844159844c28d6345401b4d1ea4aac0357debdd9a919da8f722d9244
3
+ size 2235904
english_v4/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a47ea74360075816c32abda74a93bb73286705ba09e0fd53cc2b76a4c22b4e4a
3
+ size 16748351
english_v4/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec796c171b3cd05933cc24144843843c8938874b19c974609377f48a6fec86cc
3
+ size 2207472