rfernand commited on
Commit
2e3afdf
·
verified ·
1 Parent(s): cf3c4b5

Update README.md

Browse files

update with correct dataset info

Files changed (1) hide show
  1. README.md +392 -391
README.md CHANGED
@@ -1,391 +1,392 @@
1
- ---
2
- configs:
3
- - config_name: "10_shot_rlw"
4
- data_files:
5
- - split: dev
6
- path: "10_shot_rlw/dev.*"
7
- - split: ood_cons_count_10
8
- path: "10_shot_rlw/ood_cons_count_10.*"
9
- - split: ood_cons_count_3
10
- path: "10_shot_rlw/ood_cons_count_3.*"
11
- - split: ood_cons_count_5
12
- path: "10_shot_rlw/ood_cons_count_5.*"
13
- - split: ood_cons_count_7
14
- path: "10_shot_rlw/ood_cons_count_7.*"
15
- - split: ood_cons_len_10
16
- path: "10_shot_rlw/ood_cons_len_10.*"
17
- - split: ood_cons_len_3
18
- path: "10_shot_rlw/ood_cons_len_3.*"
19
- - split: ood_cons_len_5
20
- path: "10_shot_rlw/ood_cons_len_5.*"
21
- - split: ood_cons_len_7
22
- path: "10_shot_rlw/ood_cons_len_7.*"
23
- - split: ood_lexical
24
- path: "10_shot_rlw/ood_lexical.*"
25
- - split: test
26
- path: "10_shot_rlw/test.*"
27
- - split: train
28
- path: "10_shot_rlw/train.*"
29
- - config_name: "1_shot_eng"
30
- data_files:
31
- - split: dev
32
- path: "1_shot_eng/dev.*"
33
- - split: ood_cons_count_3
34
- path: "1_shot_eng/ood_cons_count_3.*"
35
- - split: ood_cons_count_5
36
- path: "1_shot_eng/ood_cons_count_5.*"
37
- - split: ood_cons_len_3
38
- path: "1_shot_eng/ood_cons_len_3.*"
39
- - split: ood_cons_len_5
40
- path: "1_shot_eng/ood_cons_len_5.*"
41
- - split: ood_lexical
42
- path: "1_shot_eng/ood_lexical.*"
43
- - split: other_tasks_id
44
- path: "1_shot_eng/other_tasks_id.*"
45
- - split: other_tasks_ood
46
- path: "1_shot_eng/other_tasks_ood.*"
47
- - split: test
48
- path: "1_shot_eng/test.*"
49
- - split: train
50
- path: "1_shot_eng/train.*"
51
- - config_name: "1_shot_rlw"
52
- data_files:
53
- - split: dev
54
- path: "1_shot_rlw/dev.*"
55
- - split: ood_cons_count_10
56
- path: "1_shot_rlw/ood_cons_count_10.*"
57
- - split: ood_cons_count_3
58
- path: "1_shot_rlw/ood_cons_count_3.*"
59
- - split: ood_cons_count_5
60
- path: "1_shot_rlw/ood_cons_count_5.*"
61
- - split: ood_cons_count_7
62
- path: "1_shot_rlw/ood_cons_count_7.*"
63
- - split: ood_cons_len_10
64
- path: "1_shot_rlw/ood_cons_len_10.*"
65
- - split: ood_cons_len_3
66
- path: "1_shot_rlw/ood_cons_len_3.*"
67
- - split: ood_cons_len_5
68
- path: "1_shot_rlw/ood_cons_len_5.*"
69
- - split: ood_cons_len_7
70
- path: "1_shot_rlw/ood_cons_len_7.*"
71
- - split: ood_lexical
72
- path: "1_shot_rlw/ood_lexical.*"
73
- - split: test
74
- path: "1_shot_rlw/test.*"
75
- - split: train
76
- path: "1_shot_rlw/train.*"
77
- - config_name: "1_shot_rlw_10x"
78
- data_files:
79
- - split: dev
80
- path: "1_shot_rlw_10x/dev.*"
81
- - split: ood_cons_count_10
82
- path: "1_shot_rlw_10x/ood_cons_count_10.*"
83
- - split: ood_cons_count_3
84
- path: "1_shot_rlw_10x/ood_cons_count_3.*"
85
- - split: ood_cons_count_5
86
- path: "1_shot_rlw_10x/ood_cons_count_5.*"
87
- - split: ood_cons_count_7
88
- path: "1_shot_rlw_10x/ood_cons_count_7.*"
89
- - split: ood_cons_len_10
90
- path: "1_shot_rlw_10x/ood_cons_len_10.*"
91
- - split: ood_cons_len_3
92
- path: "1_shot_rlw_10x/ood_cons_len_3.*"
93
- - split: ood_cons_len_5
94
- path: "1_shot_rlw_10x/ood_cons_len_5.*"
95
- - split: ood_cons_len_7
96
- path: "1_shot_rlw_10x/ood_cons_len_7.*"
97
- - split: ood_lexical
98
- path: "1_shot_rlw_10x/ood_lexical.*"
99
- - split: test
100
- path: "1_shot_rlw_10x/test.*"
101
- - split: train
102
- path: "1_shot_rlw_10x/train.*"
103
- - config_name: "2_shot_rlw"
104
- data_files:
105
- - split: dev
106
- path: "2_shot_rlw/dev.*"
107
- - split: ood_cons_count_10
108
- path: "2_shot_rlw/ood_cons_count_10.*"
109
- - split: ood_cons_count_3
110
- path: "2_shot_rlw/ood_cons_count_3.*"
111
- - split: ood_cons_count_5
112
- path: "2_shot_rlw/ood_cons_count_5.*"
113
- - split: ood_cons_count_7
114
- path: "2_shot_rlw/ood_cons_count_7.*"
115
- - split: ood_cons_len_10
116
- path: "2_shot_rlw/ood_cons_len_10.*"
117
- - split: ood_cons_len_3
118
- path: "2_shot_rlw/ood_cons_len_3.*"
119
- - split: ood_cons_len_5
120
- path: "2_shot_rlw/ood_cons_len_5.*"
121
- - split: ood_cons_len_7
122
- path: "2_shot_rlw/ood_cons_len_7.*"
123
- - split: ood_lexical
124
- path: "2_shot_rlw/ood_lexical.*"
125
- - split: test
126
- path: "2_shot_rlw/test.*"
127
- - split: train
128
- path: "2_shot_rlw/train.*"
129
- - config_name: "3_shot_rlw"
130
- data_files:
131
- - split: dev
132
- path: "3_shot_rlw/dev.*"
133
- - split: ood_cons_count_10
134
- path: "3_shot_rlw/ood_cons_count_10.*"
135
- - split: ood_cons_count_3
136
- path: "3_shot_rlw/ood_cons_count_3.*"
137
- - split: ood_cons_count_5
138
- path: "3_shot_rlw/ood_cons_count_5.*"
139
- - split: ood_cons_count_7
140
- path: "3_shot_rlw/ood_cons_count_7.*"
141
- - split: ood_cons_len_10
142
- path: "3_shot_rlw/ood_cons_len_10.*"
143
- - split: ood_cons_len_3
144
- path: "3_shot_rlw/ood_cons_len_3.*"
145
- - split: ood_cons_len_5
146
- path: "3_shot_rlw/ood_cons_len_5.*"
147
- - split: ood_cons_len_7
148
- path: "3_shot_rlw/ood_cons_len_7.*"
149
- - split: ood_lexical
150
- path: "3_shot_rlw/ood_lexical.*"
151
- - split: test
152
- path: "3_shot_rlw/test.*"
153
- - split: train
154
- path: "3_shot_rlw/train.*"
155
- - config_name: "5_shot_rlw"
156
- data_files:
157
- - split: dev
158
- path: "5_shot_rlw/dev.*"
159
- - split: ood_cons_count_10
160
- path: "5_shot_rlw/ood_cons_count_10.*"
161
- - split: ood_cons_count_3
162
- path: "5_shot_rlw/ood_cons_count_3.*"
163
- - split: ood_cons_count_5
164
- path: "5_shot_rlw/ood_cons_count_5.*"
165
- - split: ood_cons_count_7
166
- path: "5_shot_rlw/ood_cons_count_7.*"
167
- - split: ood_cons_len_10
168
- path: "5_shot_rlw/ood_cons_len_10.*"
169
- - split: ood_cons_len_3
170
- path: "5_shot_rlw/ood_cons_len_3.*"
171
- - split: ood_cons_len_5
172
- path: "5_shot_rlw/ood_cons_len_5.*"
173
- - split: ood_cons_len_7
174
- path: "5_shot_rlw/ood_cons_len_7.*"
175
- - split: ood_lexical
176
- path: "5_shot_rlw/ood_lexical.*"
177
- - split: test
178
- path: "5_shot_rlw/test.*"
179
- - split: train
180
- path: "5_shot_rlw/train.*"
181
-
182
- annotations_creators:
183
- - machine-generated
184
- language:
185
- - en
186
- language_creators:
187
- - machine-generated
188
- license:
189
- - other
190
- multilinguality:
191
- - monolingual
192
- pretty_name: Templatic Generation Tasks for In-Context Learning Research
193
- size_categories:
194
- - 10K<n<100K
195
- - 1K<n<10K
196
- - n<1K
197
- source_datasets:
198
- - original
199
- tags:
200
- - seq2seq
201
- task_categories:
202
- - text2text-generation
203
- task_ids: []
204
- ---
205
- # Dataset Card for Active/Passive/Logical Transforms
206
-
207
- ## Table of Contents
208
- - [Dataset Description](#dataset-description)
209
- - [Dataset Summary](#dataset-summary)
210
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
211
- - [Languages](#languages)
212
- - [Dataset Structure](#dataset-structure)
213
- - [Dataset Subsets (Tasks)](#data-tasks)
214
- - [Dataset Splits](#data-splits)
215
- - [Data Instances](#data-instances)
216
- - [Data Fields](#data-fields)
217
- - [Dataset Creation](#dataset-creation)
218
- - [Curation Rationale](#curation-rationale)
219
- - [Source Data](#source-data)
220
- - [Annotations](#annotations)
221
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
222
- - [Considerations for Using the Data](#considerations-for-using-the-data)
223
- - [Social Impact of Dataset](#social-impact-of-dataset)
224
- - [Discussion of Biases](#discussion-of-biases)
225
- - [Other Known Limitations](#other-known-limitations)
226
- - [Additional Information](#additional-information)
227
- - [Dataset Curators](#dataset-curators)
228
- - [Licensing Information](#licensing-information)
229
- - [Citation Information](#citation-information)
230
- - [Contributions](#contributions)
231
-
232
- ## Dataset Description
233
-
234
- - **Homepage:**
235
- - **Repository:**
236
- - **Paper:**
237
- - **Leaderboard:**
238
- - **Point of Contact:** [Roland Fernandez](mailto:[email protected])
239
-
240
- ### Dataset Summary
241
-
242
- This dataset is a synthetic dataset containing a set of templatic generation tasks using both English and random 2-letter words.
243
-
244
- ### Supported Tasks and Leaderboards
245
-
246
- [TBD]
247
-
248
- ### Languages
249
-
250
- All data is in English or random 2-letter words.
251
-
252
- ## Dataset Structure
253
-
254
- The dataset consists of several subsets, or tasks. Each task contains a train split, a dev split, and a
255
- test split, and multiple out-of-distribution splits.
256
-
257
- Each sample in a split contains a source string, a target string, and an annotation string (describing the sample).
258
-
259
- ### Dataset Subsets (Tasks)
260
- The dataset consists of the following tasks:
261
-
262
- ```
263
- - 1_shot_rlw (1 example input/output pair, a test input, and the gold output, all using random 2-letter words)
264
- - 1_shot_eng (same as 1_shot_rlw but using English words).
265
- - 1_shot_rlw_10x (same as 1_shot_rlw, but with 10x the training samples)
266
- - 2_shot_rlw (2 example input/output pairs, a test input, and the gold output, all using random 2-letter words)
267
- - 3_shot_rlw (3 example input/output pairs, a test input, and the gold output, all using random 2-letter words)
268
- - 5_shot_rlw (5 example input/output pairs, a test input, and the gold output, all using random 2-letter words)
269
- - 10_shot_rtw (10 example input/output pairs, a test input, and the gold output, all using random 2-letter words)
270
- ```
271
-
272
- ### Data Splits
273
-
274
- Most tasks have the following splits:
275
- - train
276
- - dev
277
- - test
278
- - ood_lexical
279
- - ood_cons_count_3
280
- - ood_cons_count_5
281
- - ood_cons_count_7
282
- - ood_cons_count_10
283
- - ood_cons_len_3
284
- - ood_cons_len_5
285
- - ood_cons_len_7
286
- - ood_cons_len_10
287
-
288
- Here is a table showing how the number of examples varies by split (for most tasks):
289
-
290
- | Dataset Split | Number of Instances in Split |
291
- | ------------- | ------------------------------------------- |
292
- | train | 280,000 |
293
- | dev | 35,000 |
294
- | test | 35,000 |
295
- | ood_* | 84,000 |
296
-
297
-
298
- ### Data Instances
299
-
300
- Each sample consits of a source, target, and annotation string (all tab separated).
301
-
302
- Here is an example from the *train* split of the *1_shot_eng* task:
303
-
304
- ```
305
- {
306
- 'raw': 'Q any mouse ) ; bear A any mouse & . Q road ) ; building A road & . {"cons_count": "Q2A1", "cons_len": "Q21.Q11"}'
307
-
308
- 'source': 'Q any mouse ) ; bear A any mouse & . Q road ) ; building A',
309
- 'target': 'road & .',
310
- 'annotation': '{"cons_count": "Q2A1", "cons_len": "Q21.Q11"}'
311
- }
312
- ```
313
-
314
- ### Data Fields
315
-
316
- - `source`: the string containing the N-shot examples and the test cue
317
- - `target`: the string containing the desired (gold) output
318
- - `annotation`: the string describing the example (as a python or JSON dictionary)
319
-
320
- ## Dataset Creation
321
-
322
- ### Curation Rationale
323
-
324
- We wanted a dataset that would test in-context (and from scratch) learning of abstract, semantic-free symbolic transformations,
325
- based on a random template for each example. The dataset is designed to test 3 types of out of distribution generalization:
326
-
327
- - lexical - known words used in new contexts (relative to train split)
328
- - length - train split uses constituents of 1, 2, or 4 words; OOD splits use 3, 5, 7, or 10 words
329
- - count - train split uses 1, 2, or 4 constituents; OOD splits use 3, 5, 7, or 10 constituents
330
-
331
- ### Source Data
332
-
333
- [N/A]
334
-
335
- #### Initial Data Collection and Normalization
336
-
337
- [N/A]
338
-
339
- #### Who are the source language producers?
340
-
341
- The dataset by generated from templates designed by Paul Smolensky and Roland Fernandez.
342
-
343
- ### Annotations
344
-
345
- Besides the source and target strings, each sample contains an annotation string that describes the sample.
346
-
347
- #### Annotation process
348
-
349
- The annotation columns were generated from each sample template.
350
-
351
- #### Who are the annotators?
352
-
353
- [N/A]
354
-
355
- ### Personal and Sensitive Information
356
-
357
- No names or other sensitive information are included in the data.
358
-
359
- ## Considerations for Using the Data
360
-
361
- ### Social Impact of Dataset
362
-
363
- The purpose of this dataset is to research how LLM and from-scratch model can learn to solve templatic generation tasks.
364
-
365
- ### Discussion of Biases
366
-
367
- [TBD]
368
-
369
- ### Other Known Limitations
370
-
371
- [TBD]
372
-
373
- ## Additional Information
374
-
375
- The internal name of this dataset is nc_tgt_v11. Also see DATASET_INFO.md and GRAMMAR.md files.
376
-
377
- ### Dataset Curators
378
-
379
- The dataset by generated from templates designed by Paul Smolensky and Roland Fernandez.
380
-
381
- ### Licensing Information
382
-
383
- This dataset is released under the [Permissive 2.0 license](https://cdla.dev/permissive-2-0/).
384
-
385
- ### Citation Information
386
-
387
- [TBD]
388
-
389
- ### Contributions
390
-
391
- Thanks to [The Neurocompositional AI group at Microsoft Research](https://www.microsoft.com/en-us/research/project/neurocompositional-ai/) for creating and adding this dataset.
 
 
1
+ ---
2
+ configs:
3
+ - config_name: "10_shot_rlw"
4
+ data_files:
5
+ - split: dev
6
+ path: "10_shot_rlw/dev.*"
7
+ - split: ood_cons_count_10
8
+ path: "10_shot_rlw/ood_cons_count_10.*"
9
+ - split: ood_cons_count_3
10
+ path: "10_shot_rlw/ood_cons_count_3.*"
11
+ - split: ood_cons_count_5
12
+ path: "10_shot_rlw/ood_cons_count_5.*"
13
+ - split: ood_cons_count_7
14
+ path: "10_shot_rlw/ood_cons_count_7.*"
15
+ - split: ood_cons_len_10
16
+ path: "10_shot_rlw/ood_cons_len_10.*"
17
+ - split: ood_cons_len_3
18
+ path: "10_shot_rlw/ood_cons_len_3.*"
19
+ - split: ood_cons_len_5
20
+ path: "10_shot_rlw/ood_cons_len_5.*"
21
+ - split: ood_cons_len_7
22
+ path: "10_shot_rlw/ood_cons_len_7.*"
23
+ - split: ood_lexical
24
+ path: "10_shot_rlw/ood_lexical.*"
25
+ - split: test
26
+ path: "10_shot_rlw/test.*"
27
+ - split: train
28
+ path: "10_shot_rlw/train.*"
29
+ - config_name: "1_shot_eng"
30
+ data_files:
31
+ - split: dev
32
+ path: "1_shot_eng/dev.*"
33
+ - split: ood_cons_count_3
34
+ path: "1_shot_eng/ood_cons_count_3.*"
35
+ - split: ood_cons_count_5
36
+ path: "1_shot_eng/ood_cons_count_5.*"
37
+ - split: ood_cons_len_3
38
+ path: "1_shot_eng/ood_cons_len_3.*"
39
+ - split: ood_cons_len_5
40
+ path: "1_shot_eng/ood_cons_len_5.*"
41
+ - split: ood_lexical
42
+ path: "1_shot_eng/ood_lexical.*"
43
+ - split: other_tasks_id
44
+ path: "1_shot_eng/other_tasks_id.*"
45
+ - split: other_tasks_ood
46
+ path: "1_shot_eng/other_tasks_ood.*"
47
+ - split: test
48
+ path: "1_shot_eng/test.*"
49
+ - split: train
50
+ path: "1_shot_eng/train.*"
51
+ - config_name: "1_shot_rlw"
52
+ data_files:
53
+ - split: dev
54
+ path: "1_shot_rlw/dev.*"
55
+ - split: ood_cons_count_10
56
+ path: "1_shot_rlw/ood_cons_count_10.*"
57
+ - split: ood_cons_count_3
58
+ path: "1_shot_rlw/ood_cons_count_3.*"
59
+ - split: ood_cons_count_5
60
+ path: "1_shot_rlw/ood_cons_count_5.*"
61
+ - split: ood_cons_count_7
62
+ path: "1_shot_rlw/ood_cons_count_7.*"
63
+ - split: ood_cons_len_10
64
+ path: "1_shot_rlw/ood_cons_len_10.*"
65
+ - split: ood_cons_len_3
66
+ path: "1_shot_rlw/ood_cons_len_3.*"
67
+ - split: ood_cons_len_5
68
+ path: "1_shot_rlw/ood_cons_len_5.*"
69
+ - split: ood_cons_len_7
70
+ path: "1_shot_rlw/ood_cons_len_7.*"
71
+ - split: ood_lexical
72
+ path: "1_shot_rlw/ood_lexical.*"
73
+ - split: test
74
+ path: "1_shot_rlw/test.*"
75
+ - split: train
76
+ path: "1_shot_rlw/train.*"
77
+ - config_name: "1_shot_rlw_10x"
78
+ data_files:
79
+ - split: dev
80
+ path: "1_shot_rlw_10x/dev.*"
81
+ - split: ood_cons_count_10
82
+ path: "1_shot_rlw_10x/ood_cons_count_10.*"
83
+ - split: ood_cons_count_3
84
+ path: "1_shot_rlw_10x/ood_cons_count_3.*"
85
+ - split: ood_cons_count_5
86
+ path: "1_shot_rlw_10x/ood_cons_count_5.*"
87
+ - split: ood_cons_count_7
88
+ path: "1_shot_rlw_10x/ood_cons_count_7.*"
89
+ - split: ood_cons_len_10
90
+ path: "1_shot_rlw_10x/ood_cons_len_10.*"
91
+ - split: ood_cons_len_3
92
+ path: "1_shot_rlw_10x/ood_cons_len_3.*"
93
+ - split: ood_cons_len_5
94
+ path: "1_shot_rlw_10x/ood_cons_len_5.*"
95
+ - split: ood_cons_len_7
96
+ path: "1_shot_rlw_10x/ood_cons_len_7.*"
97
+ - split: ood_lexical
98
+ path: "1_shot_rlw_10x/ood_lexical.*"
99
+ - split: test
100
+ path: "1_shot_rlw_10x/test.*"
101
+ - split: train
102
+ path: "1_shot_rlw_10x/train.*"
103
+ - config_name: "2_shot_rlw"
104
+ data_files:
105
+ - split: dev
106
+ path: "2_shot_rlw/dev.*"
107
+ - split: ood_cons_count_10
108
+ path: "2_shot_rlw/ood_cons_count_10.*"
109
+ - split: ood_cons_count_3
110
+ path: "2_shot_rlw/ood_cons_count_3.*"
111
+ - split: ood_cons_count_5
112
+ path: "2_shot_rlw/ood_cons_count_5.*"
113
+ - split: ood_cons_count_7
114
+ path: "2_shot_rlw/ood_cons_count_7.*"
115
+ - split: ood_cons_len_10
116
+ path: "2_shot_rlw/ood_cons_len_10.*"
117
+ - split: ood_cons_len_3
118
+ path: "2_shot_rlw/ood_cons_len_3.*"
119
+ - split: ood_cons_len_5
120
+ path: "2_shot_rlw/ood_cons_len_5.*"
121
+ - split: ood_cons_len_7
122
+ path: "2_shot_rlw/ood_cons_len_7.*"
123
+ - split: ood_lexical
124
+ path: "2_shot_rlw/ood_lexical.*"
125
+ - split: test
126
+ path: "2_shot_rlw/test.*"
127
+ - split: train
128
+ path: "2_shot_rlw/train.*"
129
+ - config_name: "3_shot_rlw"
130
+ data_files:
131
+ - split: dev
132
+ path: "3_shot_rlw/dev.*"
133
+ - split: ood_cons_count_10
134
+ path: "3_shot_rlw/ood_cons_count_10.*"
135
+ - split: ood_cons_count_3
136
+ path: "3_shot_rlw/ood_cons_count_3.*"
137
+ - split: ood_cons_count_5
138
+ path: "3_shot_rlw/ood_cons_count_5.*"
139
+ - split: ood_cons_count_7
140
+ path: "3_shot_rlw/ood_cons_count_7.*"
141
+ - split: ood_cons_len_10
142
+ path: "3_shot_rlw/ood_cons_len_10.*"
143
+ - split: ood_cons_len_3
144
+ path: "3_shot_rlw/ood_cons_len_3.*"
145
+ - split: ood_cons_len_5
146
+ path: "3_shot_rlw/ood_cons_len_5.*"
147
+ - split: ood_cons_len_7
148
+ path: "3_shot_rlw/ood_cons_len_7.*"
149
+ - split: ood_lexical
150
+ path: "3_shot_rlw/ood_lexical.*"
151
+ - split: test
152
+ path: "3_shot_rlw/test.*"
153
+ - split: train
154
+ path: "3_shot_rlw/train.*"
155
+ - config_name: "5_shot_rlw"
156
+ data_files:
157
+ - split: dev
158
+ path: "5_shot_rlw/dev.*"
159
+ - split: ood_cons_count_10
160
+ path: "5_shot_rlw/ood_cons_count_10.*"
161
+ - split: ood_cons_count_3
162
+ path: "5_shot_rlw/ood_cons_count_3.*"
163
+ - split: ood_cons_count_5
164
+ path: "5_shot_rlw/ood_cons_count_5.*"
165
+ - split: ood_cons_count_7
166
+ path: "5_shot_rlw/ood_cons_count_7.*"
167
+ - split: ood_cons_len_10
168
+ path: "5_shot_rlw/ood_cons_len_10.*"
169
+ - split: ood_cons_len_3
170
+ path: "5_shot_rlw/ood_cons_len_3.*"
171
+ - split: ood_cons_len_5
172
+ path: "5_shot_rlw/ood_cons_len_5.*"
173
+ - split: ood_cons_len_7
174
+ path: "5_shot_rlw/ood_cons_len_7.*"
175
+ - split: ood_lexical
176
+ path: "5_shot_rlw/ood_lexical.*"
177
+ - split: test
178
+ path: "5_shot_rlw/test.*"
179
+ - split: train
180
+ path: "5_shot_rlw/train.*"
181
+
182
+ annotations_creators:
183
+ - machine-generated
184
+ language:
185
+ - en
186
+ language_creators:
187
+ - machine-generated
188
+ license:
189
+ - other
190
+ multilinguality:
191
+ - monolingual
192
+ pretty_name: Templatic Generation Tasks for In-Context Learning Research
193
+ size_categories:
194
+ - 10K<n<100K
195
+ - 1K<n<10K
196
+ - n<1K
197
+ source_datasets:
198
+ - original
199
+ tags:
200
+ - seq2seq
201
+ task_categories:
202
+ - text2text-generation
203
+ task_ids: []
204
+ ---
205
+ # Dataset Card for Active/Passive/Logical Transforms
206
+
207
+ ## Table of Contents
208
+ - [Dataset Description](#dataset-description)
209
+ - [Dataset Summary](#dataset-summary)
210
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
211
+ - [Languages](#languages)
212
+ - [Dataset Structure](#dataset-structure)
213
+ - [Dataset Subsets (Tasks)](#data-tasks)
214
+ - [Dataset Splits](#data-splits)
215
+ - [Data Instances](#data-instances)
216
+ - [Data Fields](#data-fields)
217
+ - [Dataset Creation](#dataset-creation)
218
+ - [Curation Rationale](#curation-rationale)
219
+ - [Source Data](#source-data)
220
+ - [Annotations](#annotations)
221
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
222
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
223
+ - [Social Impact of Dataset](#social-impact-of-dataset)
224
+ - [Discussion of Biases](#discussion-of-biases)
225
+ - [Other Known Limitations](#other-known-limitations)
226
+ - [Additional Information](#additional-information)
227
+ - [Dataset Curators](#dataset-curators)
228
+ - [Licensing Information](#licensing-information)
229
+ - [Citation Information](#citation-information)
230
+ - [Contributions](#contributions)
231
+
232
+ ## Dataset Description
233
+
234
+ - **Homepage:**
235
+ - **Repository:**
236
+ - **Paper:**
237
+ - **Leaderboard:**
238
+ - **Point of Contact:** [Roland Fernandez](mailto:[email protected])
239
+
240
+ ### Dataset Summary
241
+
242
+ This dataset is a synthetic dataset containing a set of templatic generation tasks using both English and random 2-letter words.
243
+
244
+ ### Supported Tasks and Leaderboards
245
+
246
+ [TBD]
247
+
248
+ ### Languages
249
+
250
+ All data is in English or random 2-letter words.
251
+
252
+ ## Dataset Structure
253
+
254
+ The dataset consists of several subsets, or tasks. Each task contains a train split, a dev split, and a
255
+ test split, and multiple out-of-distribution splits.
256
+
257
+ Each sample in a split contains a source string, a target string, and an annotation string (describing the sample).
258
+
259
+ ### Dataset Subsets (Tasks)
260
+ The dataset consists of the following tasks:
261
+
262
+ ```
263
+ - 1_shot_rlw (1 example input/output pair, a test input, and the gold output, all using random 2-letter words)
264
+ - 1_shot_eng (same as 1_shot_rlw but using English words).
265
+ - 1_shot_rlw_10x (same as 1_shot_rlw, but with 10x the training samples)
266
+ - 2_shot_rlw (2 example input/output pairs, a test input, and the gold output, all using random 2-letter words)
267
+ - 3_shot_rlw (3 example input/output pairs, a test input, and the gold output, all using random 2-letter words)
268
+ - 5_shot_rlw (5 example input/output pairs, a test input, and the gold output, all using random 2-letter words)
269
+ - 10_shot_rtw (10 example input/output pairs, a test input, and the gold output, all using random 2-letter words)
270
+ ```
271
+
272
+ ### Data Splits
273
+
274
+ Most tasks have the following splits:
275
+ - train
276
+ - dev
277
+ - test
278
+ - ood_lexical
279
+ - ood_cons_count_3
280
+ - ood_cons_count_5
281
+ - ood_cons_count_7
282
+ - ood_cons_count_10
283
+ - ood_cons_len_3
284
+ - ood_cons_len_5
285
+ - ood_cons_len_7
286
+ - ood_cons_len_10
287
+
288
+ Here is a table showing how the number of examples varies by split (for most tasks):
289
+
290
+ | Dataset Split | Number of Instances in Split |
291
+ | ------------- | ------------------------------------------- |
292
+ | train | 280,000 |
293
+ | dev | 35,000 |
294
+ | test | 35,000 |
295
+ | ood_* | 84,000 |
296
+
297
+
298
+ ### Data Instances
299
+
300
+ Each sample consits of a source, target, and annotation string (all tab separated).
301
+
302
+ Here is an example from the *train* split of the *1_shot_eng* task:
303
+
304
+ ```
305
+ {
306
+ 'raw': 'Q any mouse ) ; bear A any mouse & . Q road ) ; building A road & . {"cons_count": "Q2A1", "cons_len": "Q21.Q11"}'
307
+
308
+ 'source': 'Q any mouse ) ; bear A any mouse & . Q road ) ; building A',
309
+ 'target': 'road & .',
310
+ 'annotation': '{"cons_count": "Q2A1", "cons_len": "Q21.Q11"}'
311
+ }
312
+ ```
313
+
314
+ ### Data Fields
315
+
316
+ - `source`: the string containing the N-shot examples and the test cue
317
+ - `target`: the string containing the desired (gold) output
318
+ - `annotation`: the string describing the example (as a python or JSON dictionary)
319
+
320
+ ## Dataset Creation
321
+
322
+ ### Curation Rationale
323
+
324
+ We wanted a dataset that would test in-context (and from scratch) learning of abstract, semantic-free symbolic transformations,
325
+ based on a random template for each example. The dataset is designed to test 3 types of out of distribution generalization:
326
+
327
+ - lexical - known words used in new contexts (relative to train split)
328
+ - length - train split uses constituents of 1, 2, or 4 words; OOD splits use 3, 5, 7, or 10 words
329
+ - count - train split uses 1, 2, or 4 constituents; OOD splits use 3, 5, 7, or 10 constituents
330
+
331
+ ### Source Data
332
+
333
+ [N/A]
334
+
335
+ #### Initial Data Collection and Normalization
336
+
337
+ [N/A]
338
+
339
+ #### Who are the source language producers?
340
+
341
+ The dataset by generated from templates designed by Paul Smolensky and Roland Fernandez.
342
+
343
+ ### Annotations
344
+
345
+ Besides the source and target strings, each sample contains an annotation string that describes the sample.
346
+
347
+ #### Annotation process
348
+
349
+ The annotation columns were generated from each sample template.
350
+
351
+ #### Who are the annotators?
352
+
353
+ [N/A]
354
+
355
+ ### Personal and Sensitive Information
356
+
357
+ No names or other sensitive information are included in the data.
358
+
359
+ ## Considerations for Using the Data
360
+
361
+ ### Social Impact of Dataset
362
+
363
+ The purpose of this dataset is to research how LLM and from-scratch model can learn to solve templatic generation tasks.
364
+
365
+ ### Discussion of Biases
366
+
367
+ [TBD]
368
+
369
+ ### Other Known Limitations
370
+
371
+ [TBD]
372
+
373
+ ## Additional Information
374
+
375
+ The internal name of this dataset is nc_tgt_v11. Also see DATASET_INFO.md and GRAMMAR.md files.
376
+
377
+ ### Dataset Curators
378
+
379
+ The dataset by generated from templates designed by Paul Smolensky and Roland Fernandez.
380
+
381
+ ### Licensing Information
382
+
383
+ This dataset is released under the [Permissive 2.0 license](https://cdla.dev/permissive-2-0/).
384
+
385
+ ### Citation Information
386
+
387
+ [TBD]
388
+
389
+ ### Contributions
390
+
391
+ Thanks to [The Neurocompositional AI group at Microsoft Research](https://www.microsoft.com/en-us/research/project/neurocompositional-ai/) for creating and adding this dataset.
392
+