Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
guipenedo HF staff loubnabnl HF staff anton-l HF staff commited on
Commit
8adbd2b
0 Parent(s):

Super-squash branch 'main' using huggingface_hub

Browse files

Co-authored-by: loubnabnl <[email protected]>
Co-authored-by: anton-l <[email protected]>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +55 -0
  2. README.md +571 -0
  3. data/CC-MAIN-2013-20/train-00000-of-00058.parquet +3 -0
  4. data/CC-MAIN-2013-20/train-00001-of-00058.parquet +3 -0
  5. data/CC-MAIN-2013-20/train-00002-of-00058.parquet +3 -0
  6. data/CC-MAIN-2013-20/train-00003-of-00058.parquet +3 -0
  7. data/CC-MAIN-2013-20/train-00004-of-00058.parquet +3 -0
  8. data/CC-MAIN-2013-20/train-00005-of-00058.parquet +3 -0
  9. data/CC-MAIN-2013-20/train-00006-of-00058.parquet +3 -0
  10. data/CC-MAIN-2013-20/train-00007-of-00058.parquet +3 -0
  11. data/CC-MAIN-2013-20/train-00008-of-00058.parquet +3 -0
  12. data/CC-MAIN-2013-20/train-00009-of-00058.parquet +3 -0
  13. data/CC-MAIN-2013-20/train-00010-of-00058.parquet +3 -0
  14. data/CC-MAIN-2013-20/train-00011-of-00058.parquet +3 -0
  15. data/CC-MAIN-2013-20/train-00012-of-00058.parquet +3 -0
  16. data/CC-MAIN-2013-20/train-00013-of-00058.parquet +3 -0
  17. data/CC-MAIN-2013-20/train-00014-of-00058.parquet +3 -0
  18. data/CC-MAIN-2013-20/train-00015-of-00058.parquet +3 -0
  19. data/CC-MAIN-2013-20/train-00016-of-00058.parquet +3 -0
  20. data/CC-MAIN-2013-20/train-00017-of-00058.parquet +3 -0
  21. data/CC-MAIN-2013-20/train-00018-of-00058.parquet +3 -0
  22. data/CC-MAIN-2013-20/train-00019-of-00058.parquet +3 -0
  23. data/CC-MAIN-2013-20/train-00020-of-00058.parquet +3 -0
  24. data/CC-MAIN-2013-20/train-00021-of-00058.parquet +3 -0
  25. data/CC-MAIN-2013-20/train-00022-of-00058.parquet +3 -0
  26. data/CC-MAIN-2013-20/train-00023-of-00058.parquet +3 -0
  27. data/CC-MAIN-2013-20/train-00024-of-00058.parquet +3 -0
  28. data/CC-MAIN-2013-20/train-00025-of-00058.parquet +3 -0
  29. data/CC-MAIN-2013-20/train-00026-of-00058.parquet +3 -0
  30. data/CC-MAIN-2013-20/train-00027-of-00058.parquet +3 -0
  31. data/CC-MAIN-2013-20/train-00028-of-00058.parquet +3 -0
  32. data/CC-MAIN-2013-20/train-00029-of-00058.parquet +3 -0
  33. data/CC-MAIN-2013-20/train-00030-of-00058.parquet +3 -0
  34. data/CC-MAIN-2013-20/train-00031-of-00058.parquet +3 -0
  35. data/CC-MAIN-2013-20/train-00032-of-00058.parquet +3 -0
  36. data/CC-MAIN-2013-20/train-00033-of-00058.parquet +3 -0
  37. data/CC-MAIN-2013-20/train-00034-of-00058.parquet +3 -0
  38. data/CC-MAIN-2013-20/train-00035-of-00058.parquet +3 -0
  39. data/CC-MAIN-2013-20/train-00036-of-00058.parquet +3 -0
  40. data/CC-MAIN-2013-20/train-00037-of-00058.parquet +3 -0
  41. data/CC-MAIN-2013-20/train-00038-of-00058.parquet +3 -0
  42. data/CC-MAIN-2013-20/train-00039-of-00058.parquet +3 -0
  43. data/CC-MAIN-2013-20/train-00040-of-00058.parquet +3 -0
  44. data/CC-MAIN-2013-20/train-00041-of-00058.parquet +3 -0
  45. data/CC-MAIN-2013-20/train-00042-of-00058.parquet +3 -0
  46. data/CC-MAIN-2013-20/train-00043-of-00058.parquet +3 -0
  47. data/CC-MAIN-2013-20/train-00044-of-00058.parquet +3 -0
  48. data/CC-MAIN-2013-20/train-00045-of-00058.parquet +3 -0
  49. data/CC-MAIN-2013-20/train-00046-of-00058.parquet +3 -0
  50. data/CC-MAIN-2013-20/train-00047-of-00058.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: odc-by
3
+ task_categories:
4
+ - text-generation
5
+ language:
6
+ - en
7
+ pretty_name: FineWeb-Edu (score >= 2)
8
+ size_categories:
9
+ - n>1T
10
+ configs:
11
+ - config_name: default
12
+ data_files:
13
+ - split: train
14
+ path: data/*/*
15
+ - config_name: CC-MAIN-2024-46
16
+ data_files:
17
+ - split: train
18
+ path: data/CC-MAIN-2024-46/*
19
+ - config_name: CC-MAIN-2024-42
20
+ data_files:
21
+ - split: train
22
+ path: data/CC-MAIN-2024-42/*
23
+ - config_name: CC-MAIN-2024-38
24
+ data_files:
25
+ - split: train
26
+ path: data/CC-MAIN-2024-38/*
27
+ - config_name: CC-MAIN-2024-33
28
+ data_files:
29
+ - split: train
30
+ path: data/CC-MAIN-2024-33/*
31
+ - config_name: CC-MAIN-2024-30
32
+ data_files:
33
+ - split: train
34
+ path: data/CC-MAIN-2024-30/*
35
+ - config_name: CC-MAIN-2024-26
36
+ data_files:
37
+ - split: train
38
+ path: data/CC-MAIN-2024-26/*
39
+ - config_name: CC-MAIN-2024-22
40
+ data_files:
41
+ - split: train
42
+ path: data/CC-MAIN-2024-22/*
43
+ - config_name: CC-MAIN-2024-18
44
+ data_files:
45
+ - split: train
46
+ path: data/CC-MAIN-2024-18/*
47
+ - config_name: CC-MAIN-2024-10
48
+ data_files:
49
+ - split: train
50
+ path: data/CC-MAIN-2024-10/*
51
+ - config_name: CC-MAIN-2023-50
52
+ data_files:
53
+ - split: train
54
+ path: data/CC-MAIN-2023-50/*
55
+ - config_name: CC-MAIN-2023-40
56
+ data_files:
57
+ - split: train
58
+ path: data/CC-MAIN-2023-40/*
59
+ - config_name: CC-MAIN-2023-23
60
+ data_files:
61
+ - split: train
62
+ path: data/CC-MAIN-2023-23/*
63
+ - config_name: CC-MAIN-2023-14
64
+ data_files:
65
+ - split: train
66
+ path: data/CC-MAIN-2023-14/*
67
+ - config_name: CC-MAIN-2023-06
68
+ data_files:
69
+ - split: train
70
+ path: data/CC-MAIN-2023-06/*
71
+ - config_name: CC-MAIN-2022-49
72
+ data_files:
73
+ - split: train
74
+ path: data/CC-MAIN-2022-49/*
75
+ - config_name: CC-MAIN-2022-40
76
+ data_files:
77
+ - split: train
78
+ path: data/CC-MAIN-2022-40/*
79
+ - config_name: CC-MAIN-2022-33
80
+ data_files:
81
+ - split: train
82
+ path: data/CC-MAIN-2022-33/*
83
+ - config_name: CC-MAIN-2022-27
84
+ data_files:
85
+ - split: train
86
+ path: data/CC-MAIN-2022-27/*
87
+ - config_name: CC-MAIN-2022-21
88
+ data_files:
89
+ - split: train
90
+ path: data/CC-MAIN-2022-21/*
91
+ - config_name: CC-MAIN-2022-05
92
+ data_files:
93
+ - split: train
94
+ path: data/CC-MAIN-2022-05/*
95
+ - config_name: CC-MAIN-2021-49
96
+ data_files:
97
+ - split: train
98
+ path: data/CC-MAIN-2021-49/*
99
+ - config_name: CC-MAIN-2021-43
100
+ data_files:
101
+ - split: train
102
+ path: data/CC-MAIN-2021-43/*
103
+ - config_name: CC-MAIN-2021-39
104
+ data_files:
105
+ - split: train
106
+ path: data/CC-MAIN-2021-39/*
107
+ - config_name: CC-MAIN-2021-31
108
+ data_files:
109
+ - split: train
110
+ path: data/CC-MAIN-2021-31/*
111
+ - config_name: CC-MAIN-2021-25
112
+ data_files:
113
+ - split: train
114
+ path: data/CC-MAIN-2021-25/*
115
+ - config_name: CC-MAIN-2021-21
116
+ data_files:
117
+ - split: train
118
+ path: data/CC-MAIN-2021-21/*
119
+ - config_name: CC-MAIN-2021-17
120
+ data_files:
121
+ - split: train
122
+ path: data/CC-MAIN-2021-17/*
123
+ - config_name: CC-MAIN-2021-10
124
+ data_files:
125
+ - split: train
126
+ path: data/CC-MAIN-2021-10/*
127
+ - config_name: CC-MAIN-2021-04
128
+ data_files:
129
+ - split: train
130
+ path: data/CC-MAIN-2021-04/*
131
+ - config_name: CC-MAIN-2020-50
132
+ data_files:
133
+ - split: train
134
+ path: data/CC-MAIN-2020-50/*
135
+ - config_name: CC-MAIN-2020-45
136
+ data_files:
137
+ - split: train
138
+ path: data/CC-MAIN-2020-45/*
139
+ - config_name: CC-MAIN-2020-40
140
+ data_files:
141
+ - split: train
142
+ path: data/CC-MAIN-2020-40/*
143
+ - config_name: CC-MAIN-2020-34
144
+ data_files:
145
+ - split: train
146
+ path: data/CC-MAIN-2020-34/*
147
+ - config_name: CC-MAIN-2020-29
148
+ data_files:
149
+ - split: train
150
+ path: data/CC-MAIN-2020-29/*
151
+ - config_name: CC-MAIN-2020-24
152
+ data_files:
153
+ - split: train
154
+ path: data/CC-MAIN-2020-24/*
155
+ - config_name: CC-MAIN-2020-16
156
+ data_files:
157
+ - split: train
158
+ path: data/CC-MAIN-2020-16/*
159
+ - config_name: CC-MAIN-2020-10
160
+ data_files:
161
+ - split: train
162
+ path: data/CC-MAIN-2020-10/*
163
+ - config_name: CC-MAIN-2020-05
164
+ data_files:
165
+ - split: train
166
+ path: data/CC-MAIN-2020-05/*
167
+ - config_name: CC-MAIN-2019-51
168
+ data_files:
169
+ - split: train
170
+ path: data/CC-MAIN-2019-51/*
171
+ - config_name: CC-MAIN-2019-47
172
+ data_files:
173
+ - split: train
174
+ path: data/CC-MAIN-2019-47/*
175
+ - config_name: CC-MAIN-2019-43
176
+ data_files:
177
+ - split: train
178
+ path: data/CC-MAIN-2019-43/*
179
+ - config_name: CC-MAIN-2019-39
180
+ data_files:
181
+ - split: train
182
+ path: data/CC-MAIN-2019-39/*
183
+ - config_name: CC-MAIN-2019-35
184
+ data_files:
185
+ - split: train
186
+ path: data/CC-MAIN-2019-35/*
187
+ - config_name: CC-MAIN-2019-30
188
+ data_files:
189
+ - split: train
190
+ path: data/CC-MAIN-2019-30/*
191
+ - config_name: CC-MAIN-2019-26
192
+ data_files:
193
+ - split: train
194
+ path: data/CC-MAIN-2019-26/*
195
+ - config_name: CC-MAIN-2019-22
196
+ data_files:
197
+ - split: train
198
+ path: data/CC-MAIN-2019-22/*
199
+ - config_name: CC-MAIN-2019-18
200
+ data_files:
201
+ - split: train
202
+ path: data/CC-MAIN-2019-18/*
203
+ - config_name: CC-MAIN-2019-13
204
+ data_files:
205
+ - split: train
206
+ path: data/CC-MAIN-2019-13/*
207
+ - config_name: CC-MAIN-2019-09
208
+ data_files:
209
+ - split: train
210
+ path: data/CC-MAIN-2019-09/*
211
+ - config_name: CC-MAIN-2019-04
212
+ data_files:
213
+ - split: train
214
+ path: data/CC-MAIN-2019-04/*
215
+ - config_name: CC-MAIN-2018-51
216
+ data_files:
217
+ - split: train
218
+ path: data/CC-MAIN-2018-51/*
219
+ - config_name: CC-MAIN-2018-47
220
+ data_files:
221
+ - split: train
222
+ path: data/CC-MAIN-2018-47/*
223
+ - config_name: CC-MAIN-2018-43
224
+ data_files:
225
+ - split: train
226
+ path: data/CC-MAIN-2018-43/*
227
+ - config_name: CC-MAIN-2018-39
228
+ data_files:
229
+ - split: train
230
+ path: data/CC-MAIN-2018-39/*
231
+ - config_name: CC-MAIN-2018-34
232
+ data_files:
233
+ - split: train
234
+ path: data/CC-MAIN-2018-34/*
235
+ - config_name: CC-MAIN-2018-30
236
+ data_files:
237
+ - split: train
238
+ path: data/CC-MAIN-2018-30/*
239
+ - config_name: CC-MAIN-2018-26
240
+ data_files:
241
+ - split: train
242
+ path: data/CC-MAIN-2018-26/*
243
+ - config_name: CC-MAIN-2018-22
244
+ data_files:
245
+ - split: train
246
+ path: data/CC-MAIN-2018-22/*
247
+ - config_name: CC-MAIN-2018-17
248
+ data_files:
249
+ - split: train
250
+ path: data/CC-MAIN-2018-17/*
251
+ - config_name: CC-MAIN-2018-13
252
+ data_files:
253
+ - split: train
254
+ path: data/CC-MAIN-2018-13/*
255
+ - config_name: CC-MAIN-2018-09
256
+ data_files:
257
+ - split: train
258
+ path: data/CC-MAIN-2018-09/*
259
+ - config_name: CC-MAIN-2018-05
260
+ data_files:
261
+ - split: train
262
+ path: data/CC-MAIN-2018-05/*
263
+ - config_name: CC-MAIN-2017-51
264
+ data_files:
265
+ - split: train
266
+ path: data/CC-MAIN-2017-51/*
267
+ - config_name: CC-MAIN-2017-47
268
+ data_files:
269
+ - split: train
270
+ path: data/CC-MAIN-2017-47/*
271
+ - config_name: CC-MAIN-2017-43
272
+ data_files:
273
+ - split: train
274
+ path: data/CC-MAIN-2017-43/*
275
+ - config_name: CC-MAIN-2017-39
276
+ data_files:
277
+ - split: train
278
+ path: data/CC-MAIN-2017-39/*
279
+ - config_name: CC-MAIN-2017-34
280
+ data_files:
281
+ - split: train
282
+ path: data/CC-MAIN-2017-34/*
283
+ - config_name: CC-MAIN-2017-30
284
+ data_files:
285
+ - split: train
286
+ path: data/CC-MAIN-2017-30/*
287
+ - config_name: CC-MAIN-2017-26
288
+ data_files:
289
+ - split: train
290
+ path: data/CC-MAIN-2017-26/*
291
+ - config_name: CC-MAIN-2017-22
292
+ data_files:
293
+ - split: train
294
+ path: data/CC-MAIN-2017-22/*
295
+ - config_name: CC-MAIN-2017-17
296
+ data_files:
297
+ - split: train
298
+ path: data/CC-MAIN-2017-17/*
299
+ - config_name: CC-MAIN-2017-13
300
+ data_files:
301
+ - split: train
302
+ path: data/CC-MAIN-2017-13/*
303
+ - config_name: CC-MAIN-2017-09
304
+ data_files:
305
+ - split: train
306
+ path: data/CC-MAIN-2017-09/*
307
+ - config_name: CC-MAIN-2017-04
308
+ data_files:
309
+ - split: train
310
+ path: data/CC-MAIN-2017-04/*
311
+ - config_name: CC-MAIN-2016-50
312
+ data_files:
313
+ - split: train
314
+ path: data/CC-MAIN-2016-50/*
315
+ - config_name: CC-MAIN-2016-44
316
+ data_files:
317
+ - split: train
318
+ path: data/CC-MAIN-2016-44/*
319
+ - config_name: CC-MAIN-2016-40
320
+ data_files:
321
+ - split: train
322
+ path: data/CC-MAIN-2016-40/*
323
+ - config_name: CC-MAIN-2016-36
324
+ data_files:
325
+ - split: train
326
+ path: data/CC-MAIN-2016-36/*
327
+ - config_name: CC-MAIN-2016-30
328
+ data_files:
329
+ - split: train
330
+ path: data/CC-MAIN-2016-30/*
331
+ - config_name: CC-MAIN-2016-26
332
+ data_files:
333
+ - split: train
334
+ path: data/CC-MAIN-2016-26/*
335
+ - config_name: CC-MAIN-2016-22
336
+ data_files:
337
+ - split: train
338
+ path: data/CC-MAIN-2016-22/*
339
+ - config_name: CC-MAIN-2016-18
340
+ data_files:
341
+ - split: train
342
+ path: data/CC-MAIN-2016-18/*
343
+ - config_name: CC-MAIN-2016-07
344
+ data_files:
345
+ - split: train
346
+ path: data/CC-MAIN-2016-07/*
347
+ - config_name: CC-MAIN-2015-48
348
+ data_files:
349
+ - split: train
350
+ path: data/CC-MAIN-2015-48/*
351
+ - config_name: CC-MAIN-2015-40
352
+ data_files:
353
+ - split: train
354
+ path: data/CC-MAIN-2015-40/*
355
+ - config_name: CC-MAIN-2015-35
356
+ data_files:
357
+ - split: train
358
+ path: data/CC-MAIN-2015-35/*
359
+ - config_name: CC-MAIN-2015-32
360
+ data_files:
361
+ - split: train
362
+ path: data/CC-MAIN-2015-32/*
363
+ - config_name: CC-MAIN-2015-27
364
+ data_files:
365
+ - split: train
366
+ path: data/CC-MAIN-2015-27/*
367
+ - config_name: CC-MAIN-2015-22
368
+ data_files:
369
+ - split: train
370
+ path: data/CC-MAIN-2015-22/*
371
+ - config_name: CC-MAIN-2015-18
372
+ data_files:
373
+ - split: train
374
+ path: data/CC-MAIN-2015-18/*
375
+ - config_name: CC-MAIN-2015-14
376
+ data_files:
377
+ - split: train
378
+ path: data/CC-MAIN-2015-14/*
379
+ - config_name: CC-MAIN-2015-11
380
+ data_files:
381
+ - split: train
382
+ path: data/CC-MAIN-2015-11/*
383
+ - config_name: CC-MAIN-2015-06
384
+ data_files:
385
+ - split: train
386
+ path: data/CC-MAIN-2015-06/*
387
+ - config_name: CC-MAIN-2014-52
388
+ data_files:
389
+ - split: train
390
+ path: data/CC-MAIN-2014-52/*
391
+ - config_name: CC-MAIN-2014-49
392
+ data_files:
393
+ - split: train
394
+ path: data/CC-MAIN-2014-49/*
395
+ - config_name: CC-MAIN-2014-42
396
+ data_files:
397
+ - split: train
398
+ path: data/CC-MAIN-2014-42/*
399
+ - config_name: CC-MAIN-2014-41
400
+ data_files:
401
+ - split: train
402
+ path: data/CC-MAIN-2014-41/*
403
+ - config_name: CC-MAIN-2014-35
404
+ data_files:
405
+ - split: train
406
+ path: data/CC-MAIN-2014-35/*
407
+ - config_name: CC-MAIN-2014-23
408
+ data_files:
409
+ - split: train
410
+ path: data/CC-MAIN-2014-23/*
411
+ - config_name: CC-MAIN-2014-15
412
+ data_files:
413
+ - split: train
414
+ path: data/CC-MAIN-2014-15/*
415
+ - config_name: CC-MAIN-2014-10
416
+ data_files:
417
+ - split: train
418
+ path: data/CC-MAIN-2014-10/*
419
+ - config_name: CC-MAIN-2013-48
420
+ data_files:
421
+ - split: train
422
+ path: data/CC-MAIN-2013-48/*
423
+ - config_name: CC-MAIN-2013-20
424
+ data_files:
425
+ - split: train
426
+ path: data/CC-MAIN-2013-20/*
427
+ ---
428
+
429
+ # 📚 FineWeb-Edu-score-2
430
+ <center>
431
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/61c141342aac764ce1654e43/wwRnEQydH9qdRtFofIE-A.png" alt="FineWeb-Edu: The finest collection of educational content the web has to offer">
432
+ </center>
433
+
434
+ > 1.3 trillion tokens of the finest educational data the 🌐 web has to offer
435
+
436
+ ## What is it?
437
+
438
+ 📚 FineWeb-Edu dataset consists of **1.3T tokens** ([FineWeb-Edu](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)) and **5.4T tokens** of educational web pages filtered from 🍷 FineWeb dataset. This is the 5.4 trillion version.
439
+
440
+ ### Note: this version uses a lower educational score threshold = 2, which results in more documents, but lower quality compared to the 1.3T version. For more details check the FineWeb [blog post](https://huggingface.co/spaces/HuggingFaceFW/blogpost-fineweb-v1).
441
+
442
+ To enhance FineWeb's quality, we developed an [educational quality classifier](https://huggingface.co/HuggingFaceFW/fineweb-edu-classifier) using annotations generated by LLama3-70B-Instruct. We then used this classifier to retain only the most educational web pages. FineWeb-Edu outperforms FineWeb on popular benchmarks and shows the power of classifiers trained on synthetic data.
443
+
444
+ The [Dataset Curation](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu#dataset-curation) section details the process for creating the dataset.
445
+
446
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/61c141342aac764ce1654e43/QqXOM8h_ZjjhuCv71xmV7.png)
447
+
448
+ ## What is being released?
449
+
450
+ Along with the dataset, which includes all filtered CommonCrawl dumps since 2013, we also release the educational classifier used for the filtering as well as the code for training it and running inference at: https://github.com/huggingface/cosmopedia/tree/main/classification.
451
+
452
+ ## Changelog
453
+ _Previous versions remain available in the branch `version name`._
454
+
455
+ - **v1.2.0 (27-12-2024):** Added 8 new dumps: `CC-MAIN-2024-18`, `CC-MAIN-2024-22`, `CC-MAIN-2024-26`, `CC-MAIN-2024-30`, `CC-MAIN-2024-33`, `CC-MAIN-2024-38`, `CC-MAIN-2024-42`, `CC-MAIN-2024-46`, covering May to November 2024.
456
+ - **v1.0.0 (02-06-2024):** Initial version
457
+
458
+ ## How to load the dataset
459
+ Similarily to FineWeb, You can load the full dataset or a specific crawl/dump. Dumps have the format `CC-MAIN-(year)-(week number)`.
460
+
461
+ ### Using 🏭 [`datatrove`](https://github.com/huggingface/datatrove/)
462
+
463
+ ```python
464
+ from datatrove.pipeline.readers import ParquetReader
465
+
466
+ # limit determines how many documents will be streamed (remove for all)
467
+ data_reader = ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu-score-2", glob_pattern="data/*/*.parquet", limit=1000)
468
+ data_reader = ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu-score-2/CC-MAIN-2024-10", limit=1000)
469
+ for document in data_reader():
470
+ # do something with document
471
+ print(document)
472
+
473
+ ###############################
474
+ # OR for a processing pipeline:
475
+ ###############################
476
+
477
+ from datatrove.executor import LocalPipelineExecutor
478
+ from datatrove.pipeline.readers import ParquetReader
479
+ from datatrove.pipeline.filters import LambdaFilter
480
+ from datatrove.pipeline.writers import JsonlWriter
481
+
482
+ pipeline_exec = LocalPipelineExecutor(
483
+ pipeline=[
484
+ ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu-score-2/CC-MAIN-2024-10", limit=1000),
485
+ LambdaFilter(lambda doc: "hugging" in doc.text),
486
+ JsonlWriter("some-output-path")
487
+ ],
488
+ tasks=10
489
+ )
490
+ pipeline_exec.run()
491
+ ```
492
+
493
+ ### Using `datasets`
494
+
495
+ ```python
496
+ from datasets import load_dataset
497
+
498
+ fw = load_dataset("HuggingFaceFW/fineweb-edu-score-2", name="CC-MAIN-2024-10", split="train", streaming=True)
499
+ ```
500
+
501
+ ## Dataset curation
502
+ A new approach has recently emerged for filtering LLM training datasets: using synthetic data to develop classifiers for identifying educational content. This technique was used in the trainings of [LLama3](https://ai.meta.com/blog/meta-llama-3-meta-ai-responsibility/), [Claude3](https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf) and [Phi3](https://arxiv.org/abs/2404.14219), but its large-scale impact on web data filtering hasn't been fully explored or published.
503
+
504
+ The highly popular Phi3 models were trained on 3.3 and 4.8 trillion tokens, with the paper stating: “Our training data consists of heavily filtered publicly available web data (according to the 'educational level') from various open internet sources, as well as synthetic LLM-generated data". Similarly, the LLama3 blog post notes: “We found that previous generations of Llama are good at identifying high-quality data, so we used Llama 2 to help build the text-quality classifiers that are powering Llama 3.” However these classifiers and filtered datasets are not publicly available. To enhance FineWeb's quality, we developed an educational quality classifier using annotations generated by [LLama3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) to create FineWeb-Edu.
505
+
506
+ ### Annotation
507
+ We used [Llama3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) to score 500k FineWeb samples for their educational quality on a scale from 0 to 5.
508
+
509
+ We explored various prompts and found that the additive scale by [Yuan et al.](https://arxiv.org/pdf/2401.10020) worked best. To avoid the LLM favoring highly technical pages like arXiv abstracts and submissions, we focused on grade-school and middle-school level knowledge. By setting a threshold of 3 (on a scale of 0 to 5) during the filtering process, we were able to also retain some high-level educational pages. The final prompt can be found in this blog post TODO.
510
+
511
+ We also experimented with different LLMs: Llama3-70B-Instruct, Mixtral-8x-7B-Instruct, and Mixtral-8x22B-Instruct. Llama3 and Mixtral-8x22B produced similar scores, while Mixtral-8x7B tended to be more generous, not fully adhering to the score scale. Verga et al. suggest using multiple LLMs as juries. We tried averaging the scores from the three models, but this shifted the distribution to the right due to the higher scores from Mixtral-8x7B. Training on a dataset filtered with a classifier using jury annotations performed worse than using a classifier based on Llama3 annotations. We hypothesize that the jury-based approach retains more low-quality samples.
512
+
513
+ ### Classifier training
514
+ We fine-tuned a Bert-like regression model using these annotations, based on [Snowflake-arctic-embed](https://huggingface.co/Snowflake/snowflake-arctic-embed-m). When converted to a binary classification using a score of 3 as a threshold for keeping and removing files, the model achieved an F1 score of 82%. The classification of FineWeb 15T tokens took 6k H100 GPU hours.
515
+
516
+ The classifier is available at: [https://huggingface.co/HuggingFaceFW/fineweb-edu-classifier/ ](https://huggingface.co/HuggingFaceFW/fineweb-edu-classifier/)
517
+
518
+ ### Filtering and results
519
+ **Note**: You can find more details about the ablations and results in the FineWeb blog post (TODO).
520
+
521
+ We investigated the impact of using different thresholds for the filtering and found that threshold 3 gave the best overall results. Although using a threshold higher than 3 improves performance on knowledge and reasoning intensive benchmarks, it significantly degrades performance on HellaSwag and PIQA.
522
+
523
+ We then built 📚 FineWeb-Edu by filtering out samples with scores lower than 3. This removed 92% of the dataset, leaving us with 1.3T educational tokens. Our ablation demonstrated that this refined dataset surpasses 🍷 FineWeb and all other open web datasets, with remarkable improvements on educational benchmarks such as MMLU, ARC, and OpenBookQA. The plot below compares FineWeb-Edu to other web datasets:
524
+
525
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/61c141342aac764ce1654e43/hJlyTgDzZpYuxO9LUm0PF.png)
526
+
527
+ To retain more tokens, we also experimented with a less strict threshold of 2 instead of 3. While being less performant than using threshold 3, it still outperformed FineWeb and it preserved 5.4T tokens. We release these two dataset as [FineWeb-Edu](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu) and [FineWeb-Edu-score-2](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu-score-2) along with the [classifier](https://huggingface.co/HuggingFaceFW/fineweb-edu-classifier).
528
+
529
+ You will find all the ablation models in [this collection](https://huggingface.co/collections/HuggingFaceFW/ablation-models-662457b0d213e8c14fe47f32). The FineWeb-Edu ablation model (trained on 350B tokens) is available at [https://huggingface.co/HuggingFaceFW/ablation-model-fineweb-edu](https://huggingface.co/HuggingFaceFW/ablation-model-fineweb-edu).
530
+
531
+ ## Considerations for Using the Data
532
+ This section is copied from the parent dataset: [FineWeb](https://huggingface.co/datasets/HuggingFaceFW/fineweb).
533
+
534
+ ### Social Impact of Dataset
535
+
536
+ With the release of this dataset we aim to make model training more accessible to the machine learning community at large.
537
+
538
+ While multiple open-weights models with strong performance have been publicly released in the past, more often than not these releases are not accompanied by the corresponding training dataset. This is unfortunate as the dataset specificities and characteristics have been demonstrated to have a very large impact and role in the performances of the models. As the creation of a high quality training dataset is a fundamental requirement to training an LLM capable of excelling at downstream tasks, with 🍷 FineWeb we (a) not only make the dataset creation process more transparent, by sharing our entire processing setup including the codebase used, we also (b) help alleviate the costs of dataset curation, both in time and in compute, for model creators by publicly releasing our dataset with the community.
539
+
540
+ ### Discussion of Biases
541
+
542
+ Efforts were made to minimize the amount of NSFW and toxic content present in the dataset by employing filtering on the URL level. However, there are still a significant number of documents present in the final dataset that could be considered toxic or contain harmful content. As 🍷 FineWeb was sourced from the web as a whole, any harmful biases typically present in it may be reproduced on our dataset.
543
+
544
+ We deliberately avoided using machine learning filtering methods that define text quality based on the similarity to a “gold” source such as wikipedia or toxicity classifiers as these methods have been known to [disproportionately remove content in specific dialects](https://aclanthology.org/D16-1120/) and [overclassify as toxic text related to specific social identities](https://arxiv.org/pdf/2109.07445.pdf), respectively.
545
+
546
+ ### Other Known Limitations
547
+
548
+ As a consequence of some of the filtering steps applied, it is likely that code content is not prevalent in our dataset. If you are training a model that should also perform code tasks, we recommend you use 🍷 FineWeb with a code dataset, such as [The Stack v2](https://huggingface.co/datasets/bigcode/the-stack-v2). You should also probably consider complementing 🍷 FineWeb with specialized curated sources (such as Wikipedia, for example) as they will likely have better formatting than the wikipedia content included in 🍷 FineWeb (we did not tailor the processing to individual websites).
549
+
550
+ ## Additional Information
551
+
552
+ ### Licensing Information
553
+
554
+ The dataset is released under the **Open Data Commons Attribution License (ODC-By) v1.0** [license](https://opendatacommons.org/licenses/by/1-0/). The use of this dataset is also subject to [CommonCrawl's Terms of Use](https://commoncrawl.org/terms-of-use).
555
+
556
+ ### Future work
557
+
558
+ We plan to work on better educational classifier to improve the quality of FineWeb-Edu.
559
+
560
+ ### Citation Information
561
+
562
+ ```
563
+ @software{lozhkov2024fineweb-edu,
564
+ author = {Lozhkov, Anton and Ben Allal, Loubna and von Werra, Leandro and Wolf, Thomas},
565
+ title = {FineWeb-Edu},
566
+ month = May,
567
+ year = 2024,
568
+ url = {https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu}
569
+ }
570
+
571
+ ```
data/CC-MAIN-2013-20/train-00000-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a466452ece5d65e971889ffc01f4fe6d78cd6e9b22e9ccf2fefd200b4e52e4f5
3
+ size 2421484584
data/CC-MAIN-2013-20/train-00001-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:867f9bb8fecca7a5742508e4559081806156644f8a57ac125ae36fbd209a79cb
3
+ size 2421150141
data/CC-MAIN-2013-20/train-00002-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b60086ecfa077d03354271375b5e95ef13d7a9f49d3aebed26a6958fa87ff1a9
3
+ size 2420700748
data/CC-MAIN-2013-20/train-00003-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebb459eb33f46684f201cdeffa9c20f874cba314850af392a984de6fe563e8a7
3
+ size 2424690795
data/CC-MAIN-2013-20/train-00004-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:134c5f9150b917e7aec2cf05cf3ed88d4db29864bbfaf9ba72424bf14ddd696d
3
+ size 2427775529
data/CC-MAIN-2013-20/train-00005-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f1e5c5dd85b4b00320473c32007f987c7c2114e874d57fc5deb891c60e9f600
3
+ size 2420834487
data/CC-MAIN-2013-20/train-00006-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ed54e8d0e61898bf7c375c02f4f3f8b6ab3a7c83e1f909e5141ecdf1cbe6561
3
+ size 2421861912
data/CC-MAIN-2013-20/train-00007-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ced39ad5beaa226e55d3cba2152609f15534a0ebab8146d9703e7b9277d10519
3
+ size 2418569229
data/CC-MAIN-2013-20/train-00008-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7849d1785ce6e411f33c016453f040acc23747f697073f5d9731102123cc8f7
3
+ size 2412895767
data/CC-MAIN-2013-20/train-00009-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36776feba09f038f6d509511c30f7bb2c7c632753b3e33e72f5e4bc0c9d03198
3
+ size 2416837741
data/CC-MAIN-2013-20/train-00010-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:374dec8eaad8b2ec5da4b6ec68df0104de4446a6a0d02d4d22e246a288864fb6
3
+ size 2419107801
data/CC-MAIN-2013-20/train-00011-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:baf50895214909efa82c417189f05b12ee8fe23bf5e49cde8a50df1129aac01b
3
+ size 2412444373
data/CC-MAIN-2013-20/train-00012-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1970f832815559b0f3eee8ef9d3125a1a94d87e4e83d893868afc9af66afe436
3
+ size 2413406631
data/CC-MAIN-2013-20/train-00013-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb082f7bb9dc0cfeccdc95b089cc33111f6349763eb2b6d3dbaa2a45b2223149
3
+ size 2417609529
data/CC-MAIN-2013-20/train-00014-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1066ac31d6e315b0727162bbfeaccce03bba4b1c62c9166ee412d140e03222b3
3
+ size 2403227738
data/CC-MAIN-2013-20/train-00015-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3fb23f0e22b8c65c225f76e47dd1f4c4ef0b0780768c0420784f574e88ef5cc
3
+ size 2405210316
data/CC-MAIN-2013-20/train-00016-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e58448a17c7a21a29b2d990d98b2d97e0856f467cfd371b21beba93e4fe51bd6
3
+ size 2409064010
data/CC-MAIN-2013-20/train-00017-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:881be89c63151c5c828cc3cda8a305f3fc1988933606c1dee244b7691982add6
3
+ size 2398760783
data/CC-MAIN-2013-20/train-00018-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2aa2163bfaa9bafb7385451cdf2304fd3cb53f9ea60e132a0c3f2560fe05c9d
3
+ size 2402145567
data/CC-MAIN-2013-20/train-00019-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7ba42dc621a9b54f5a22b6f94e36679647d9c6e6d14f640cf3e242051f7984b
3
+ size 2390604122
data/CC-MAIN-2013-20/train-00020-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73304c24f2fab908498e2faed6b45496c764e104125725ebfc856d09013d6286
3
+ size 2400968451
data/CC-MAIN-2013-20/train-00021-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be446c459f90d9c3d11692ff1ccea0c8e516f2acf084a4ab593fe46bf2490e50
3
+ size 2401116646
data/CC-MAIN-2013-20/train-00022-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4949ad7ad4a4cb310d7a36843eaa689a0cc647a52099de939174212fdf72257
3
+ size 2400718009
data/CC-MAIN-2013-20/train-00023-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:351a9b57de3b0b358165c35aee6a71793cd11aeff0ba5a2edb48e578a56e80dd
3
+ size 2394189333
data/CC-MAIN-2013-20/train-00024-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93a8302dfe649290c64dbe7621d6683a25829398d07588fd9be69f7d11940d56
3
+ size 2392674183
data/CC-MAIN-2013-20/train-00025-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e69788c63c57da28e24f894181a5f7c5129c691087764fdcc5f2870f2c72bbe
3
+ size 2403664611
data/CC-MAIN-2013-20/train-00026-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:776b0222f8b43649267e2018a433051db6ad656e0524cb79e6fc502b635501c9
3
+ size 2396437547
data/CC-MAIN-2013-20/train-00027-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56c43d082f9f20e3d538657bb1b8eac2397aad589d72af572187aab49eeac16e
3
+ size 2388614584
data/CC-MAIN-2013-20/train-00028-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22107f94278d3375ebeb828a34fb4a6e8fef604181b194e08e66a68cb53eb71a
3
+ size 2385954799
data/CC-MAIN-2013-20/train-00029-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3680034ad2fd3a758cf0bd96af669da0e9c6f5946ec84f00b4e9b729826441b
3
+ size 2389247824
data/CC-MAIN-2013-20/train-00030-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb44d4b712fa90af8195eb3e8f67ceb004013b8813785659f86397199268ea2d
3
+ size 2382578955
data/CC-MAIN-2013-20/train-00031-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9a22f9957f8667e2c99f7fe9f239dd0eee31f0bb076660f9683ab968b9f590b
3
+ size 2384852952
data/CC-MAIN-2013-20/train-00032-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b491ed5ab3eaf4970f570f40ecbaa1622e7749965662d9b6440c0b3c29cf20b
3
+ size 2375615515
data/CC-MAIN-2013-20/train-00033-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be2f8e46c5324f6ce21db2485c6b75d8392d9c0deef4064a915d2c49886fc04a
3
+ size 2379065571
data/CC-MAIN-2013-20/train-00034-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbefdfcbbbbeecfa699d786d54ee7fdb578ecd13dce793f69b108d3f524fe80c
3
+ size 2386214675
data/CC-MAIN-2013-20/train-00035-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4758a944346597794c76aa788db112b0f3d1d60c8ef673ffe1905fdee724c4c2
3
+ size 2374185636
data/CC-MAIN-2013-20/train-00036-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2891d2116d4f57627bb70c50a3557c26d61cafc0a79c2d4a44bbee7b8327dfdd
3
+ size 2377595520
data/CC-MAIN-2013-20/train-00037-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2b5ca55739149b231680867698b29089aa527511d2e46daadaba2e0aa12a459
3
+ size 2378234484
data/CC-MAIN-2013-20/train-00038-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:492c07787661c4899395eb105bdbe8110e070caf3029715058e55d99c677a44a
3
+ size 2372490030
data/CC-MAIN-2013-20/train-00039-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33a249cd43133ed240ae3e118d27c608ca1f5c17ec72e6fb6a28a567f3a8bf10
3
+ size 2373555561
data/CC-MAIN-2013-20/train-00040-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39b5e0aa8bea1a23fb9f06c95f67c35fe342d6e318d5e79449fc09c0c5ec01d9
3
+ size 2372244429
data/CC-MAIN-2013-20/train-00041-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6b9b92445c3978a4125217066664eeb44fa6ea7a411a6c68179f221c7dbf627
3
+ size 2371266303
data/CC-MAIN-2013-20/train-00042-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5129cb18c225dde5c88195e6c3fe307f3bbf78ba53359a1afe3b26d1829561c0
3
+ size 2362438842
data/CC-MAIN-2013-20/train-00043-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac5747744e331271e65f50d7cd934339ffd7407c9d97ff5cfc95e7a4d3f769ba
3
+ size 2369544718
data/CC-MAIN-2013-20/train-00044-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1322e5927c5c27508e31ac9d9a86213c7b8c3a1de1bdec009a8d7e3ac7b6852c
3
+ size 2371398812
data/CC-MAIN-2013-20/train-00045-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e834486fd3ff044ca75c39ab61e8750a2f2c3a37b26675ad501ce1a60a3a434
3
+ size 2359852280
data/CC-MAIN-2013-20/train-00046-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46074de146107c02eda7121f4bbafce18b613f66cd0a56f71afedf6ce3a306cc
3
+ size 2370689089
data/CC-MAIN-2013-20/train-00047-of-00058.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edd97e004b63532330dad15e3b22eba34fb2a7afd0f9f82b8b13361e0e39bd33
3
+ size 2364438354