Convert dataset to Parquet

#16
by lhoestq HF Staff - opened
README.md CHANGED
@@ -29,6 +29,46 @@ tags:
29
  - NLU
30
  - natural language understanding
31
  dataset_info:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  - config_name: boolq
33
  features:
34
  - name: question
@@ -41,20 +81,20 @@ dataset_info:
41
  dtype:
42
  class_label:
43
  names:
44
- '0': 'False'
45
- '1': 'True'
46
  splits:
47
- - name: test
48
- num_bytes: 2107997
49
- num_examples: 3245
50
  - name: train
51
- num_bytes: 6179206
52
  num_examples: 9427
53
  - name: validation
54
- num_bytes: 2118505
55
  num_examples: 3270
56
- download_size: 4118001
57
- dataset_size: 10405708
 
 
 
58
  - config_name: cb
59
  features:
60
  - name: premise
@@ -71,17 +111,17 @@ dataset_info:
71
  '1': contradiction
72
  '2': neutral
73
  splits:
74
- - name: test
75
- num_bytes: 93660
76
- num_examples: 250
77
  - name: train
78
- num_bytes: 87218
79
  num_examples: 250
80
  - name: validation
81
- num_bytes: 21894
82
  num_examples: 56
83
- download_size: 75482
84
- dataset_size: 202772
 
 
 
85
  - config_name: copa
86
  features:
87
  - name: premise
@@ -101,17 +141,17 @@ dataset_info:
101
  '0': choice1
102
  '1': choice2
103
  splits:
104
- - name: test
105
- num_bytes: 60303
106
- num_examples: 500
107
  - name: train
108
- num_bytes: 49599
109
  num_examples: 400
110
  - name: validation
111
- num_bytes: 12586
112
  num_examples: 100
113
- download_size: 43986
114
- dataset_size: 122488
 
 
 
115
  - config_name: multirc
116
  features:
117
  - name: paragraph
@@ -132,20 +172,20 @@ dataset_info:
132
  dtype:
133
  class_label:
134
  names:
135
- '0': 'False'
136
- '1': 'True'
137
  splits:
138
- - name: test
139
- num_bytes: 14996451
140
- num_examples: 9693
141
  - name: train
142
- num_bytes: 46213579
143
  num_examples: 27243
144
  - name: validation
145
- num_bytes: 7758918
146
  num_examples: 4848
147
- download_size: 1116225
148
- dataset_size: 68968948
 
 
 
149
  - config_name: record
150
  features:
151
  - name: passage
@@ -172,16 +212,16 @@ dataset_info:
172
  dtype: int32
173
  splits:
174
  - name: train
175
- num_bytes: 179232052
176
  num_examples: 100730
177
  - name: validation
178
- num_bytes: 17479084
179
  num_examples: 10000
180
  - name: test
181
- num_bytes: 17200575
182
  num_examples: 10000
183
- download_size: 51757880
184
- dataset_size: 213911711
185
  - config_name: rte
186
  features:
187
  - name: premise
@@ -197,17 +237,17 @@ dataset_info:
197
  '0': entailment
198
  '1': not_entailment
199
  splits:
200
- - name: test
201
- num_bytes: 975799
202
- num_examples: 3000
203
  - name: train
204
- num_bytes: 848745
205
  num_examples: 2490
206
  - name: validation
207
- num_bytes: 90899
208
  num_examples: 277
209
- download_size: 750920
210
- dataset_size: 1915443
 
 
 
211
  - config_name: wic
212
  features:
213
  - name: word
@@ -230,20 +270,20 @@ dataset_info:
230
  dtype:
231
  class_label:
232
  names:
233
- '0': 'False'
234
- '1': 'True'
235
  splits:
236
- - name: test
237
- num_bytes: 180593
238
- num_examples: 1400
239
  - name: train
240
- num_bytes: 665183
241
  num_examples: 5428
242
  - name: validation
243
- num_bytes: 82623
244
  num_examples: 638
245
- download_size: 396213
246
- dataset_size: 928399
 
 
 
247
  - config_name: wsc
248
  features:
249
  - name: text
@@ -262,20 +302,20 @@ dataset_info:
262
  dtype:
263
  class_label:
264
  names:
265
- '0': 'False'
266
- '1': 'True'
267
  splits:
268
- - name: test
269
- num_bytes: 31572
270
- num_examples: 146
271
  - name: train
272
- num_bytes: 89883
273
  num_examples: 554
274
  - name: validation
275
- num_bytes: 21637
276
  num_examples: 104
277
- download_size: 32751
278
- dataset_size: 143092
 
 
 
279
  - config_name: wsc.fixed
280
  features:
281
  - name: text
@@ -294,60 +334,101 @@ dataset_info:
294
  dtype:
295
  class_label:
296
  names:
297
- '0': 'False'
298
- '1': 'True'
299
  splits:
300
- - name: test
301
- num_bytes: 31568
302
- num_examples: 146
303
  - name: train
304
- num_bytes: 89883
305
  num_examples: 554
306
  - name: validation
307
- num_bytes: 21637
308
  num_examples: 104
309
- download_size: 32751
310
- dataset_size: 143088
311
- - config_name: axb
312
- features:
313
- - name: sentence1
314
- dtype: string
315
- - name: sentence2
316
- dtype: string
317
- - name: idx
318
- dtype: int32
319
- - name: label
320
- dtype:
321
- class_label:
322
- names:
323
- '0': entailment
324
- '1': not_entailment
325
- splits:
326
  - name: test
327
- num_bytes: 238392
328
- num_examples: 1104
329
- download_size: 33950
330
- dataset_size: 238392
 
 
 
 
 
331
  - config_name: axg
332
- features:
333
- - name: premise
334
- dtype: string
335
- - name: hypothesis
336
- dtype: string
337
- - name: idx
338
- dtype: int32
339
- - name: label
340
- dtype:
341
- class_label:
342
- names:
343
- '0': entailment
344
- '1': not_entailment
345
- splits:
346
- - name: test
347
- num_bytes: 53581
348
- num_examples: 356
349
- download_size: 10413
350
- dataset_size: 53581
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
  ---
352
 
353
  # Dataset Card for "super_glue"
 
29
  - NLU
30
  - natural language understanding
31
  dataset_info:
32
+ - config_name: axb
33
+ features:
34
+ - name: sentence1
35
+ dtype: string
36
+ - name: sentence2
37
+ dtype: string
38
+ - name: idx
39
+ dtype: int32
40
+ - name: label
41
+ dtype:
42
+ class_label:
43
+ names:
44
+ '0': entailment
45
+ '1': not_entailment
46
+ splits:
47
+ - name: test
48
+ num_bytes: 237694
49
+ num_examples: 1104
50
+ download_size: 80924
51
+ dataset_size: 237694
52
+ - config_name: axg
53
+ features:
54
+ - name: premise
55
+ dtype: string
56
+ - name: hypothesis
57
+ dtype: string
58
+ - name: idx
59
+ dtype: int32
60
+ - name: label
61
+ dtype:
62
+ class_label:
63
+ names:
64
+ '0': entailment
65
+ '1': not_entailment
66
+ splits:
67
+ - name: test
68
+ num_bytes: 53348
69
+ num_examples: 356
70
+ download_size: 14284
71
+ dataset_size: 53348
72
  - config_name: boolq
73
  features:
74
  - name: question
 
81
  dtype:
82
  class_label:
83
  names:
84
+ - 'False'
85
+ - 'True'
86
  splits:
 
 
 
87
  - name: train
88
+ num_bytes: 6173303
89
  num_examples: 9427
90
  - name: validation
91
+ num_bytes: 2116452
92
  num_examples: 3270
93
+ - name: test
94
+ num_bytes: 2105959
95
+ num_examples: 3245
96
+ download_size: 6470231
97
+ dataset_size: 10395714
98
  - config_name: cb
99
  features:
100
  - name: premise
 
111
  '1': contradiction
112
  '2': neutral
113
  splits:
 
 
 
114
  - name: train
115
+ num_bytes: 87050
116
  num_examples: 250
117
  - name: validation
118
+ num_bytes: 21851
119
  num_examples: 56
120
+ - name: test
121
+ num_bytes: 93492
122
+ num_examples: 250
123
+ download_size: 139513
124
+ dataset_size: 202393
125
  - config_name: copa
126
  features:
127
  - name: premise
 
141
  '0': choice1
142
  '1': choice2
143
  splits:
 
 
 
144
  - name: train
145
+ num_bytes: 49233
146
  num_examples: 400
147
  - name: validation
148
+ num_bytes: 12479
149
  num_examples: 100
150
+ - name: test
151
+ num_bytes: 59846
152
+ num_examples: 500
153
+ download_size: 86141
154
+ dataset_size: 121558
155
  - config_name: multirc
156
  features:
157
  - name: paragraph
 
172
  dtype:
173
  class_label:
174
  names:
175
+ - 'False'
176
+ - 'True'
177
  splits:
 
 
 
178
  - name: train
179
+ num_bytes: 46182913
180
  num_examples: 27243
181
  - name: validation
182
+ num_bytes: 7753452
183
  num_examples: 4848
184
+ - name: test
185
+ num_bytes: 14985531
186
+ num_examples: 9693
187
+ download_size: 2595545
188
+ dataset_size: 68921896
189
  - config_name: record
190
  features:
191
  - name: passage
 
212
  dtype: int32
213
  splits:
214
  - name: train
215
+ num_bytes: 178757646
216
  num_examples: 100730
217
  - name: validation
218
+ num_bytes: 17432944
219
  num_examples: 10000
220
  - name: test
221
+ num_bytes: 17154943
222
  num_examples: 10000
223
+ download_size: 91157911
224
+ dataset_size: 213345533
225
  - config_name: rte
226
  features:
227
  - name: premise
 
237
  '0': entailment
238
  '1': not_entailment
239
  splits:
 
 
 
240
  - name: train
241
+ num_bytes: 847177
242
  num_examples: 2490
243
  - name: validation
244
+ num_bytes: 90716
245
  num_examples: 277
246
+ - name: test
247
+ num_bytes: 973916
248
+ num_examples: 3000
249
+ download_size: 1277939
250
+ dataset_size: 1911809
251
  - config_name: wic
252
  features:
253
  - name: word
 
270
  dtype:
271
  class_label:
272
  names:
273
+ - 'False'
274
+ - 'True'
275
  splits:
 
 
 
276
  - name: train
277
+ num_bytes: 658381
278
  num_examples: 5428
279
  - name: validation
280
+ num_bytes: 81811
281
  num_examples: 638
282
+ - name: test
283
+ num_bytes: 178831
284
+ num_examples: 1400
285
+ download_size: 597226
286
+ dataset_size: 919023
287
  - config_name: wsc
288
  features:
289
  - name: text
 
302
  dtype:
303
  class_label:
304
  names:
305
+ - 'False'
306
+ - 'True'
307
  splits:
 
 
 
308
  - name: train
309
+ num_bytes: 89311
310
  num_examples: 554
311
  - name: validation
312
+ num_bytes: 21521
313
  num_examples: 104
314
+ - name: test
315
+ num_bytes: 31408
316
+ num_examples: 146
317
+ download_size: 49592
318
+ dataset_size: 142240
319
  - config_name: wsc.fixed
320
  features:
321
  - name: text
 
334
  dtype:
335
  class_label:
336
  names:
337
+ - 'False'
338
+ - 'True'
339
  splits:
 
 
 
340
  - name: train
341
+ num_bytes: 89311
342
  num_examples: 554
343
  - name: validation
344
+ num_bytes: 21521
345
  num_examples: 104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
  - name: test
347
+ num_bytes: 31404
348
+ num_examples: 146
349
+ download_size: 49706
350
+ dataset_size: 142236
351
+ configs:
352
+ - config_name: axb
353
+ data_files:
354
+ - split: test
355
+ path: axb/test-*
356
  - config_name: axg
357
+ data_files:
358
+ - split: test
359
+ path: axg/test-*
360
+ - config_name: boolq
361
+ data_files:
362
+ - split: train
363
+ path: boolq/train-*
364
+ - split: validation
365
+ path: boolq/validation-*
366
+ - split: test
367
+ path: boolq/test-*
368
+ - config_name: cb
369
+ data_files:
370
+ - split: train
371
+ path: cb/train-*
372
+ - split: validation
373
+ path: cb/validation-*
374
+ - split: test
375
+ path: cb/test-*
376
+ - config_name: copa
377
+ data_files:
378
+ - split: train
379
+ path: copa/train-*
380
+ - split: validation
381
+ path: copa/validation-*
382
+ - split: test
383
+ path: copa/test-*
384
+ - config_name: multirc
385
+ data_files:
386
+ - split: train
387
+ path: multirc/train-*
388
+ - split: validation
389
+ path: multirc/validation-*
390
+ - split: test
391
+ path: multirc/test-*
392
+ - config_name: record
393
+ data_files:
394
+ - split: train
395
+ path: record/train-*
396
+ - split: validation
397
+ path: record/validation-*
398
+ - split: test
399
+ path: record/test-*
400
+ - config_name: rte
401
+ data_files:
402
+ - split: train
403
+ path: rte/train-*
404
+ - split: validation
405
+ path: rte/validation-*
406
+ - split: test
407
+ path: rte/test-*
408
+ - config_name: wic
409
+ data_files:
410
+ - split: train
411
+ path: wic/train-*
412
+ - split: validation
413
+ path: wic/validation-*
414
+ - split: test
415
+ path: wic/test-*
416
+ - config_name: wsc
417
+ data_files:
418
+ - split: train
419
+ path: wsc/train-*
420
+ - split: validation
421
+ path: wsc/validation-*
422
+ - split: test
423
+ path: wsc/test-*
424
+ - config_name: wsc.fixed
425
+ data_files:
426
+ - split: train
427
+ path: wsc.fixed/train-*
428
+ - split: validation
429
+ path: wsc.fixed/validation-*
430
+ - split: test
431
+ path: wsc.fixed/test-*
432
  ---
433
 
434
  # Dataset Card for "super_glue"
axb/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:655cadf2b23ebf0147984c8809811a82453137ee22f131d802137862ff3f2501
3
+ size 80924
axg/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ed3eddc728664a79b7b25cbec8aea1528d917f04870116d2ae84fdbc1f80028
3
+ size 14284
boolq/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95a74cd7c8fdbae1aa9ba767fa62993fcc2bd582ee5512f42a8c6c89b88a7c3d
3
+ size 1309912
boolq/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41e2ce07ac0d3bcd411bd4b6326bab2cff5fa67bacdbbcea74bb9e001ec635d3
3
+ size 3846588
boolq/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f46438b8cce048184552a2bfcdb8252eadb3a2602f0c22334caccd9b6ea547d6
3
+ size 1313731
cb/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9b1164fed6ca2d65ef873fd45626fc4cf744f6fc34ac0d468cf558da872e61c
3
+ size 63490
cb/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a338d1276fd0248fd4d6648917ef7181f994d85d141b3a2a4fc6a659d69f3a8a
3
+ size 58001
cb/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36cf71dba8a314d36266b71bb136e3bad9e20c46b8983cf44d4a13d9031d0951
3
+ size 18022
copa/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d7d6e12adc051ed21587afb2ddad9a9e733fbc8c43ffb68fae52129d04828b9
3
+ size 40228
copa/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e243c3a2428837f5725f19885ddf3a83eecb6874474606a2ca1d1e6dd5bd6d7
3
+ size 33938
copa/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:247a7f586c5cc706eb18a15ad20f1a5b543b9c7af0df4161df138a25edc95d67
3
+ size 11975
multirc/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:465107b93b83c89df826603445512ac887074a747e0d23fb6b1f87c692059b29
3
+ size 580878
multirc/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ced6d731a335a0e4fbd29f6f382f291518952c0f449448361d294000c39e6728
3
+ size 1708291
multirc/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5eb64893afc5af7828b2f3b41d6a4574eb7a82cbb17c86c75842f5f95f679ada
3
+ size 306376
record/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a91cd50dbdf98cfdb6d9ec6619176ddab170db299db0a13107109ad61262a9d
3
+ size 7992474
record/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20ae20deb498305cb872b52987c63c46fa08bd97605c6fd78a1553bf8d91a417
3
+ size 75054886
record/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3662aa327fec7faaf7f47c651941c35ff1643105bf81291168c37a78a37b59a1
3
+ size 8110551
rte/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a4c57d7b81ed0034bd4ba0a85950f3d5c2e0c7d76836e2f3be22e8514656e86
3
+ size 621960
rte/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16c83717da2e5b14cfd75e88b6331ac88d8cbf8f4fcd8bc48941c67ac8e9b3a9
3
+ size 586160
rte/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3740aac80522a9a86e9d926287b813531eeabdead967399b1fd899c640fac76
3
+ size 69819
super_glue.py DELETED
@@ -1,637 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """The SuperGLUE benchmark."""
18
-
19
-
20
- import json
21
- import os
22
-
23
- import datasets
24
-
25
-
26
- _SUPER_GLUE_CITATION = """\
27
- @article{wang2019superglue,
28
- title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
29
- author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
30
- journal={arXiv preprint arXiv:1905.00537},
31
- year={2019}
32
- }
33
-
34
- Note that each SuperGLUE dataset has its own citation. Please see the source to
35
- get the correct citation for each contained dataset.
36
- """
37
-
38
- _GLUE_DESCRIPTION = """\
39
- SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
40
- GLUE with a new set of more difficult language understanding tasks, improved
41
- resources, and a new public leaderboard.
42
-
43
- """
44
-
45
- _BOOLQ_DESCRIPTION = """\
46
- BoolQ (Boolean Questions, Clark et al., 2019a) is a QA task where each example consists of a short
47
- passage and a yes/no question about the passage. The questions are provided anonymously and
48
- unsolicited by users of the Google search engine, and afterwards paired with a paragraph from a
49
- Wikipedia article containing the answer. Following the original work, we evaluate with accuracy."""
50
-
51
- _CB_DESCRIPTION = """\
52
- The CommitmentBank (De Marneffe et al., 2019) is a corpus of short texts in which at least
53
- one sentence contains an embedded clause. Each of these embedded clauses is annotated with the
54
- degree to which we expect that the person who wrote the text is committed to the truth of the clause.
55
- The resulting task framed as three-class textual entailment on examples that are drawn from the Wall
56
- Street Journal, fiction from the British National Corpus, and Switchboard. Each example consists
57
- of a premise containing an embedded clause and the corresponding hypothesis is the extraction of
58
- that clause. We use a subset of the data that had inter-annotator agreement above 0.85. The data is
59
- imbalanced (relatively fewer neutral examples), so we evaluate using accuracy and F1, where for
60
- multi-class F1 we compute the unweighted average of the F1 per class."""
61
-
62
- _COPA_DESCRIPTION = """\
63
- The Choice Of Plausible Alternatives (COPA, Roemmele et al., 2011) dataset is a causal
64
- reasoning task in which a system is given a premise sentence and two possible alternatives. The
65
- system must choose the alternative which has the more plausible causal relationship with the premise.
66
- The method used for the construction of the alternatives ensures that the task requires causal reasoning
67
- to solve. Examples either deal with alternative possible causes or alternative possible effects of the
68
- premise sentence, accompanied by a simple question disambiguating between the two instance
69
- types for the model. All examples are handcrafted and focus on topics from online blogs and a
70
- photography-related encyclopedia. Following the recommendation of the authors, we evaluate using
71
- accuracy."""
72
-
73
- _RECORD_DESCRIPTION = """\
74
- (Reading Comprehension with Commonsense Reasoning Dataset, Zhang et al., 2018) is a
75
- multiple-choice QA task. Each example consists of a news article and a Cloze-style question about
76
- the article in which one entity is masked out. The system must predict the masked out entity from a
77
- given list of possible entities in the provided passage, where the same entity may be expressed using
78
- multiple different surface forms, all of which are considered correct. Articles are drawn from CNN
79
- and Daily Mail. Following the original work, we evaluate with max (over all mentions) token-level
80
- F1 and exact match (EM)."""
81
-
82
- _RTE_DESCRIPTION = """\
83
- The Recognizing Textual Entailment (RTE) datasets come from a series of annual competitions
84
- on textual entailment, the problem of predicting whether a given premise sentence entails a given
85
- hypothesis sentence (also known as natural language inference, NLI). RTE was previously included
86
- in GLUE, and we use the same data and format as before: We merge data from RTE1 (Dagan
87
- et al., 2006), RTE2 (Bar Haim et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli
88
- et al., 2009). All datasets are combined and converted to two-class classification: entailment and
89
- not_entailment. Of all the GLUE tasks, RTE was among those that benefited from transfer learning
90
- the most, jumping from near random-chance performance (~56%) at the time of GLUE's launch to
91
- 85% accuracy (Liu et al., 2019c) at the time of writing. Given the eight point gap with respect to
92
- human performance, however, the task is not yet solved by machines, and we expect the remaining
93
- gap to be difficult to close."""
94
-
95
- _MULTIRC_DESCRIPTION = """\
96
- The Multi-Sentence Reading Comprehension dataset (MultiRC, Khashabi et al., 2018)
97
- is a true/false question-answering task. Each example consists of a context paragraph, a question
98
- about that paragraph, and a list of possible answers to that question which must be labeled as true or
99
- false. Question-answering (QA) is a popular problem with many datasets. We use MultiRC because
100
- of a number of desirable properties: (i) each question can have multiple possible correct answers,
101
- so each question-answer pair must be evaluated independent of other pairs, (ii) the questions are
102
- designed such that answering each question requires drawing facts from multiple context sentences,
103
- and (iii) the question-answer pair format more closely matches the API of other SuperGLUE tasks
104
- than span-based extractive QA does. The paragraphs are drawn from seven domains including news,
105
- fiction, and historical text."""
106
-
107
- _WIC_DESCRIPTION = """\
108
- The Word-in-Context (WiC, Pilehvar and Camacho-Collados, 2019) dataset supports a word
109
- sense disambiguation task cast as binary classification over sentence pairs. Given two sentences and a
110
- polysemous (sense-ambiguous) word that appears in both sentences, the task is to determine whether
111
- the word is used with the same sense in both sentences. Sentences are drawn from WordNet (Miller,
112
- 1995), VerbNet (Schuler, 2005), and Wiktionary. We follow the original work and evaluate using
113
- accuracy."""
114
-
115
- _WSC_DESCRIPTION = """\
116
- The Winograd Schema Challenge (WSC, Levesque et al., 2012) is a reading comprehension
117
- task in which a system must read a sentence with a pronoun and select the referent of that pronoun
118
- from a list of choices. Given the difficulty of this task and the headroom still left, we have included
119
- WSC in SuperGLUE and recast the dataset into its coreference form. The task is cast as a binary
120
- classification problem, as opposed to N-multiple choice, in order to isolate the model's ability to
121
- understand the coreference links within a sentence as opposed to various other strategies that may
122
- come into play in multiple choice conditions. With that in mind, we create a split with 65% negative
123
- majority class in the validation set, reflecting the distribution of the hidden test set, and 52% negative
124
- class in the training set. The training and validation examples are drawn from the original Winograd
125
- Schema dataset (Levesque et al., 2012), as well as those distributed by the affiliated organization
126
- Commonsense Reasoning. The test examples are derived from fiction books and have been shared
127
- with us by the authors of the original dataset. Previously, a version of WSC recast as NLI as included
128
- in GLUE, known as WNLI. No substantial progress was made on WNLI, with many submissions
129
- opting to submit only majority class predictions. WNLI was made especially difficult due to an
130
- adversarial train/dev split: Premise sentences that appeared in the training set sometimes appeared
131
- in the development set with a different hypothesis and a flipped label. If a system memorized the
132
- training set without meaningfully generalizing, which was easy due to the small size of the training
133
- set, it could perform far below chance on the development set. We remove this adversarial design
134
- in the SuperGLUE version of WSC by ensuring that no sentences are shared between the training,
135
- validation, and test sets.
136
-
137
- However, the validation and test sets come from different domains, with the validation set consisting
138
- of ambiguous examples such that changing one non-noun phrase word will change the coreference
139
- dependencies in the sentence. The test set consists only of more straightforward examples, with a
140
- high number of noun phrases (and thus more choices for the model), but low to no ambiguity."""
141
-
142
- _AXB_DESCRIPTION = """\
143
- An expert-constructed,
144
- diagnostic dataset that automatically tests models for a broad range of linguistic, commonsense, and
145
- world knowledge. Each example in this broad-coverage diagnostic is a sentence pair labeled with
146
- a three-way entailment relation (entailment, neutral, or contradiction) and tagged with labels that
147
- indicate the phenomena that characterize the relationship between the two sentences. Submissions
148
- to the GLUE leaderboard are required to include predictions from the submission's MultiNLI
149
- classifier on the diagnostic dataset, and analyses of the results were shown alongside the main
150
- leaderboard. Since this broad-coverage diagnostic task has proved difficult for top models, we retain
151
- it in SuperGLUE. However, since MultiNLI is not part of SuperGLUE, we collapse contradiction
152
- and neutral into a single not_entailment label, and request that submissions include predictions
153
- on the resulting set from the model used for the RTE task.
154
- """
155
-
156
- _AXG_DESCRIPTION = """\
157
- Winogender is designed to measure gender
158
- bias in coreference resolution systems. We use the Diverse Natural Language Inference Collection
159
- (DNC; Poliak et al., 2018) version that casts Winogender as a textual entailment task. Each example
160
- consists of a premise sentence with a male or female pronoun and a hypothesis giving a possible
161
- antecedent of the pronoun. Examples occur in minimal pairs, where the only difference between
162
- an example and its pair is the gender of the pronoun in the premise. Performance on Winogender
163
- is measured with both accuracy and the gender parity score: the percentage of minimal pairs for
164
- which the predictions are the same. We note that a system can trivially obtain a perfect gender parity
165
- score by guessing the same class for all examples, so a high gender parity score is meaningless unless
166
- accompanied by high accuracy. As a diagnostic test of gender bias, we view the schemas as having high
167
- positive predictive value and low negative predictive value; that is, they may demonstrate the presence
168
- of gender bias in a system, but not prove its absence.
169
- """
170
-
171
- _BOOLQ_CITATION = """\
172
- @inproceedings{clark2019boolq,
173
- title={BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions},
174
- author={Clark, Christopher and Lee, Kenton and Chang, Ming-Wei, and Kwiatkowski, Tom and Collins, Michael, and Toutanova, Kristina},
175
- booktitle={NAACL},
176
- year={2019}
177
- }"""
178
-
179
- _CB_CITATION = """\
180
- @article{de marneff_simons_tonhauser_2019,
181
- title={The CommitmentBank: Investigating projection in naturally occurring discourse},
182
- journal={proceedings of Sinn und Bedeutung 23},
183
- author={De Marneff, Marie-Catherine and Simons, Mandy and Tonhauser, Judith},
184
- year={2019}
185
- }"""
186
-
187
- _COPA_CITATION = """\
188
- @inproceedings{roemmele2011choice,
189
- title={Choice of plausible alternatives: An evaluation of commonsense causal reasoning},
190
- author={Roemmele, Melissa and Bejan, Cosmin Adrian and Gordon, Andrew S},
191
- booktitle={2011 AAAI Spring Symposium Series},
192
- year={2011}
193
- }"""
194
-
195
- _RECORD_CITATION = """\
196
- @article{zhang2018record,
197
- title={Record: Bridging the gap between human and machine commonsense reading comprehension},
198
- author={Zhang, Sheng and Liu, Xiaodong and Liu, Jingjing and Gao, Jianfeng and Duh, Kevin and Van Durme, Benjamin},
199
- journal={arXiv preprint arXiv:1810.12885},
200
- year={2018}
201
- }"""
202
-
203
- _RTE_CITATION = """\
204
- @inproceedings{dagan2005pascal,
205
- title={The PASCAL recognising textual entailment challenge},
206
- author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
207
- booktitle={Machine Learning Challenges Workshop},
208
- pages={177--190},
209
- year={2005},
210
- organization={Springer}
211
- }
212
- @inproceedings{bar2006second,
213
- title={The second pascal recognising textual entailment challenge},
214
- author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
215
- booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
216
- volume={6},
217
- number={1},
218
- pages={6--4},
219
- year={2006},
220
- organization={Venice}
221
- }
222
- @inproceedings{giampiccolo2007third,
223
- title={The third pascal recognizing textual entailment challenge},
224
- author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
225
- booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
226
- pages={1--9},
227
- year={2007},
228
- organization={Association for Computational Linguistics}
229
- }
230
- @inproceedings{bentivogli2009fifth,
231
- title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
232
- author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
233
- booktitle={TAC},
234
- year={2009}
235
- }"""
236
-
237
- _MULTIRC_CITATION = """\
238
- @inproceedings{MultiRC2018,
239
- author = {Daniel Khashabi and Snigdha Chaturvedi and Michael Roth and Shyam Upadhyay and Dan Roth},
240
- title = {Looking Beyond the Surface:A Challenge Set for Reading Comprehension over Multiple Sentences},
241
- booktitle = {Proceedings of North American Chapter of the Association for Computational Linguistics (NAACL)},
242
- year = {2018}
243
- }"""
244
-
245
- _WIC_CITATION = """\
246
- @article{DBLP:journals/corr/abs-1808-09121,
247
- author={Mohammad Taher Pilehvar and os{\'{e}} Camacho{-}Collados},
248
- title={WiC: 10, 000 Example Pairs for Evaluating Context-Sensitive Representations},
249
- journal={CoRR},
250
- volume={abs/1808.09121},
251
- year={2018},
252
- url={http://arxiv.org/abs/1808.09121},
253
- archivePrefix={arXiv},
254
- eprint={1808.09121},
255
- timestamp={Mon, 03 Sep 2018 13:36:40 +0200},
256
- biburl={https://dblp.org/rec/bib/journals/corr/abs-1808-09121},
257
- bibsource={dblp computer science bibliography, https://dblp.org}
258
- }"""
259
-
260
- _WSC_CITATION = """\
261
- @inproceedings{levesque2012winograd,
262
- title={The winograd schema challenge},
263
- author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
264
- booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
265
- year={2012}
266
- }"""
267
-
268
- _AXG_CITATION = """\
269
- @inproceedings{rudinger-EtAl:2018:N18,
270
- author = {Rudinger, Rachel and Naradowsky, Jason and Leonard, Brian and {Van Durme}, Benjamin},
271
- title = {Gender Bias in Coreference Resolution},
272
- booktitle = {Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},
273
- month = {June},
274
- year = {2018},
275
- address = {New Orleans, Louisiana},
276
- publisher = {Association for Computational Linguistics}
277
- }
278
- """
279
-
280
-
281
- class SuperGlueConfig(datasets.BuilderConfig):
282
- """BuilderConfig for SuperGLUE."""
283
-
284
- def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
285
- """BuilderConfig for SuperGLUE.
286
-
287
- Args:
288
- features: `list[string]`, list of the features that will appear in the
289
- feature dict. Should not include "label".
290
- data_url: `string`, url to download the zip file from.
291
- citation: `string`, citation for the data set.
292
- url: `string`, url for information about the data set.
293
- label_classes: `list[string]`, the list of classes for the label if the
294
- label is present as a string. Non-string labels will be cast to either
295
- 'False' or 'True'.
296
- **kwargs: keyword arguments forwarded to super.
297
- """
298
- # Version history:
299
- # 1.0.3: Fix not including entity position in ReCoRD.
300
- # 1.0.2: Fixed non-nondeterminism in ReCoRD.
301
- # 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to
302
- # the full release (v2.0).
303
- # 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
304
- # 0.0.2: Initial version.
305
- super(SuperGlueConfig, self).__init__(version=datasets.Version("1.0.3"), **kwargs)
306
- self.features = features
307
- self.label_classes = label_classes
308
- self.data_url = data_url
309
- self.citation = citation
310
- self.url = url
311
-
312
-
313
- class SuperGlue(datasets.GeneratorBasedBuilder):
314
- """The SuperGLUE benchmark."""
315
-
316
- BUILDER_CONFIGS = [
317
- SuperGlueConfig(
318
- name="boolq",
319
- description=_BOOLQ_DESCRIPTION,
320
- features=["question", "passage"],
321
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip",
322
- citation=_BOOLQ_CITATION,
323
- url="https://github.com/google-research-datasets/boolean-questions",
324
- ),
325
- SuperGlueConfig(
326
- name="cb",
327
- description=_CB_DESCRIPTION,
328
- features=["premise", "hypothesis"],
329
- label_classes=["entailment", "contradiction", "neutral"],
330
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/CB.zip",
331
- citation=_CB_CITATION,
332
- url="https://github.com/mcdm/CommitmentBank",
333
- ),
334
- SuperGlueConfig(
335
- name="copa",
336
- description=_COPA_DESCRIPTION,
337
- label_classes=["choice1", "choice2"],
338
- # Note that question will only be the X in the statement "What's
339
- # the X for this?".
340
- features=["premise", "choice1", "choice2", "question"],
341
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/COPA.zip",
342
- citation=_COPA_CITATION,
343
- url="http://people.ict.usc.edu/~gordon/copa.html",
344
- ),
345
- SuperGlueConfig(
346
- name="multirc",
347
- description=_MULTIRC_DESCRIPTION,
348
- features=["paragraph", "question", "answer"],
349
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/MultiRC.zip",
350
- citation=_MULTIRC_CITATION,
351
- url="https://cogcomp.org/multirc/",
352
- ),
353
- SuperGlueConfig(
354
- name="record",
355
- description=_RECORD_DESCRIPTION,
356
- # Note that entities and answers will be a sequences of strings. Query
357
- # will contain @placeholder as a substring, which represents the word
358
- # to be substituted in.
359
- features=["passage", "query", "entities", "entity_spans", "answers"],
360
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/ReCoRD.zip",
361
- citation=_RECORD_CITATION,
362
- url="https://sheng-z.github.io/ReCoRD-explorer/",
363
- ),
364
- SuperGlueConfig(
365
- name="rte",
366
- description=_RTE_DESCRIPTION,
367
- features=["premise", "hypothesis"],
368
- label_classes=["entailment", "not_entailment"],
369
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/RTE.zip",
370
- citation=_RTE_CITATION,
371
- url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
372
- ),
373
- SuperGlueConfig(
374
- name="wic",
375
- description=_WIC_DESCRIPTION,
376
- # Note that start1, start2, end1, and end2 will be integers stored as
377
- # datasets.Value('int32').
378
- features=["word", "sentence1", "sentence2", "start1", "start2", "end1", "end2"],
379
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/WiC.zip",
380
- citation=_WIC_CITATION,
381
- url="https://pilehvar.github.io/wic/",
382
- ),
383
- SuperGlueConfig(
384
- name="wsc",
385
- description=_WSC_DESCRIPTION,
386
- # Note that span1_index and span2_index will be integers stored as
387
- # datasets.Value('int32').
388
- features=["text", "span1_index", "span2_index", "span1_text", "span2_text"],
389
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/WSC.zip",
390
- citation=_WSC_CITATION,
391
- url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
392
- ),
393
- SuperGlueConfig(
394
- name="wsc.fixed",
395
- description=(
396
- _WSC_DESCRIPTION + "\n\nThis version fixes issues where the spans are not actually "
397
- "substrings of the text."
398
- ),
399
- # Note that span1_index and span2_index will be integers stored as
400
- # datasets.Value('int32').
401
- features=["text", "span1_index", "span2_index", "span1_text", "span2_text"],
402
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/WSC.zip",
403
- citation=_WSC_CITATION,
404
- url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
405
- ),
406
- SuperGlueConfig(
407
- name="axb",
408
- description=_AXB_DESCRIPTION,
409
- features=["sentence1", "sentence2"],
410
- label_classes=["entailment", "not_entailment"],
411
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/AX-b.zip",
412
- citation="", # The GLUE citation is sufficient.
413
- url="https://gluebenchmark.com/diagnostics",
414
- ),
415
- SuperGlueConfig(
416
- name="axg",
417
- description=_AXG_DESCRIPTION,
418
- features=["premise", "hypothesis"],
419
- label_classes=["entailment", "not_entailment"],
420
- data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/AX-g.zip",
421
- citation=_AXG_CITATION,
422
- url="https://github.com/rudinger/winogender-schemas",
423
- ),
424
- ]
425
-
426
- def _info(self):
427
- features = {feature: datasets.Value("string") for feature in self.config.features}
428
- if self.config.name.startswith("wsc"):
429
- features["span1_index"] = datasets.Value("int32")
430
- features["span2_index"] = datasets.Value("int32")
431
- if self.config.name == "wic":
432
- features["start1"] = datasets.Value("int32")
433
- features["start2"] = datasets.Value("int32")
434
- features["end1"] = datasets.Value("int32")
435
- features["end2"] = datasets.Value("int32")
436
- if self.config.name == "multirc":
437
- features["idx"] = dict(
438
- {
439
- "paragraph": datasets.Value("int32"),
440
- "question": datasets.Value("int32"),
441
- "answer": datasets.Value("int32"),
442
- }
443
- )
444
- elif self.config.name == "record":
445
- features["idx"] = dict(
446
- {
447
- "passage": datasets.Value("int32"),
448
- "query": datasets.Value("int32"),
449
- }
450
- )
451
- else:
452
- features["idx"] = datasets.Value("int32")
453
-
454
- if self.config.name == "record":
455
- # Entities are the set of possible choices for the placeholder.
456
- features["entities"] = datasets.features.Sequence(datasets.Value("string"))
457
- # The start and end indices of paragraph text for each entity.
458
- features["entity_spans"] = datasets.features.Sequence(
459
- {
460
- "text": datasets.Value("string"),
461
- "start": datasets.Value("int32"),
462
- "end": datasets.Value("int32"),
463
- }
464
- )
465
- # Answers are the subset of entities that are correct.
466
- features["answers"] = datasets.features.Sequence(datasets.Value("string"))
467
- else:
468
- features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
469
-
470
- return datasets.DatasetInfo(
471
- description=_GLUE_DESCRIPTION + self.config.description,
472
- features=datasets.Features(features),
473
- homepage=self.config.url,
474
- citation=self.config.citation + "\n" + _SUPER_GLUE_CITATION,
475
- )
476
-
477
- def _split_generators(self, dl_manager):
478
- dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
479
- task_name = _get_task_name_from_data_url(self.config.data_url)
480
- dl_dir = os.path.join(dl_dir, task_name)
481
- if self.config.name in ["axb", "axg"]:
482
- return [
483
- datasets.SplitGenerator(
484
- name=datasets.Split.TEST,
485
- gen_kwargs={
486
- "data_file": os.path.join(dl_dir, f"{task_name}.jsonl"),
487
- "split": datasets.Split.TEST,
488
- },
489
- ),
490
- ]
491
- return [
492
- datasets.SplitGenerator(
493
- name=datasets.Split.TRAIN,
494
- gen_kwargs={
495
- "data_file": os.path.join(dl_dir, "train.jsonl"),
496
- "split": datasets.Split.TRAIN,
497
- },
498
- ),
499
- datasets.SplitGenerator(
500
- name=datasets.Split.VALIDATION,
501
- gen_kwargs={
502
- "data_file": os.path.join(dl_dir, "val.jsonl"),
503
- "split": datasets.Split.VALIDATION,
504
- },
505
- ),
506
- datasets.SplitGenerator(
507
- name=datasets.Split.TEST,
508
- gen_kwargs={
509
- "data_file": os.path.join(dl_dir, "test.jsonl"),
510
- "split": datasets.Split.TEST,
511
- },
512
- ),
513
- ]
514
-
515
- def _generate_examples(self, data_file, split):
516
- with open(data_file, encoding="utf-8") as f:
517
- for line in f:
518
- row = json.loads(line)
519
-
520
- if self.config.name == "multirc":
521
- paragraph = row["passage"]
522
- for question in paragraph["questions"]:
523
- for answer in question["answers"]:
524
- label = answer.get("label")
525
- key = "%s_%s_%s" % (row["idx"], question["idx"], answer["idx"])
526
- yield key, {
527
- "paragraph": paragraph["text"],
528
- "question": question["question"],
529
- "answer": answer["text"],
530
- "label": -1 if label is None else _cast_label(bool(label)),
531
- "idx": {"paragraph": row["idx"], "question": question["idx"], "answer": answer["idx"]},
532
- }
533
- elif self.config.name == "record":
534
- passage = row["passage"]
535
- entity_texts, entity_spans = _get_record_entities(passage)
536
- for qa in row["qas"]:
537
- yield qa["idx"], {
538
- "passage": passage["text"],
539
- "query": qa["query"],
540
- "entities": entity_texts,
541
- "entity_spans": entity_spans,
542
- "answers": _get_record_answers(qa),
543
- "idx": {"passage": row["idx"], "query": qa["idx"]},
544
- }
545
- else:
546
- if self.config.name.startswith("wsc"):
547
- row.update(row["target"])
548
- example = {feature: row[feature] for feature in self.config.features}
549
- if self.config.name == "wsc.fixed":
550
- example = _fix_wst(example)
551
- example["idx"] = row["idx"]
552
-
553
- if "label" in row:
554
- if self.config.name == "copa":
555
- example["label"] = "choice2" if row["label"] else "choice1"
556
- else:
557
- example["label"] = _cast_label(row["label"])
558
- else:
559
- assert split == datasets.Split.TEST, row
560
- example["label"] = -1
561
- yield example["idx"], example
562
-
563
-
564
- def _fix_wst(ex):
565
- """Fixes most cases where spans are not actually substrings of text."""
566
-
567
- def _fix_span_text(k):
568
- """Fixes a single span."""
569
- text = ex[k + "_text"]
570
- index = ex[k + "_index"]
571
-
572
- if text in ex["text"]:
573
- return
574
-
575
- if text in ("Kamenev and Zinoviev", "Kamenev, Zinoviev, and Stalin"):
576
- # There is no way to correct these examples since the subjects have
577
- # intervening text.
578
- return
579
-
580
- if "theyscold" in text:
581
- ex["text"].replace("theyscold", "they scold")
582
- ex["span2_index"] = 10
583
- # Make sure case of the first words match.
584
- first_word = ex["text"].split()[index]
585
- if first_word[0].islower():
586
- text = text[0].lower() + text[1:]
587
- else:
588
- text = text[0].upper() + text[1:]
589
- # Remove punctuation in span.
590
- text = text.rstrip(".")
591
- # Replace incorrect whitespace character in span.
592
- text = text.replace("\n", " ")
593
- ex[k + "_text"] = text
594
- assert ex[k + "_text"] in ex["text"], ex
595
-
596
- _fix_span_text("span1")
597
- _fix_span_text("span2")
598
- return ex
599
-
600
-
601
- def _cast_label(label):
602
- """Converts the label into the appropriate string version."""
603
- if isinstance(label, str):
604
- return label
605
- elif isinstance(label, bool):
606
- return "True" if label else "False"
607
- elif isinstance(label, int):
608
- assert label in (0, 1)
609
- return str(label)
610
- else:
611
- raise ValueError("Invalid label format.")
612
-
613
-
614
- def _get_record_entities(passage):
615
- """Returns the unique set of entities."""
616
- text = passage["text"]
617
- entity_spans = list()
618
- for entity in passage["entities"]:
619
- entity_text = text[entity["start"] : entity["end"] + 1]
620
- entity_spans.append({"text": entity_text, "start": entity["start"], "end": entity["end"] + 1})
621
- entity_spans = sorted(entity_spans, key=lambda e: e["start"]) # sort by start index
622
- entity_texts = set(e["text"] for e in entity_spans) # for backward compatability
623
- return entity_texts, entity_spans
624
-
625
-
626
- def _get_record_answers(qa):
627
- """Returns the unique set of answers."""
628
- if "answers" not in qa:
629
- return []
630
- answers = set()
631
- for answer in qa["answers"]:
632
- answers.add(answer["text"])
633
- return sorted(answers)
634
-
635
-
636
- def _get_task_name_from_data_url(data_url):
637
- return data_url.split("/")[-1].split(".")[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
wic/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e77d121835508a9e4c15bfb79bbc8188510d61da06f9d8fb0f892e1762b8469a
3
+ size 126788
wic/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6d2231730f52800dba48219790ee9b76381bbba7c99ea76047081631877ec80
3
+ size 410323
wic/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22cc5df62bf76c8ed47ea0c92b4bb1bce698984ac1c10493250365d3419ac6da
3
+ size 60115
wsc.fixed/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5308e3ca654355a00911fd400e500c4b79c7806856075a83184adcf71b3c5cc3
3
+ size 12162
wsc.fixed/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13086042694cd7550f91f4279fafa8425cead3ed046c6a6cd22d96107f1a1944
3
+ size 27336
wsc.fixed/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa64cc8f27c793b60b6263302c26ae49f768a6b90d050e383fc3f99d2080b201
3
+ size 10208
wsc/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a39c4f576a415fd1139428a1423911c7523090b2da9de9202f82c5b1955e3f1
3
+ size 12164
wsc/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1fb4be43c74cc03e4b519adc63660820f3bde7367a464c39e61dd17b8af0c7a
3
+ size 27222
wsc/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1459bd73672508365680b4b3eb584e6cf33dfa151762863f7eed2e494062634a
3
+ size 10206