Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
English
ArXiv:
DOI:
Libraries:
Datasets
Dask
License:
guipenedo HF staff lhoestq HF staff anton-l HF staff loubnabnl HF staff thomwolf HF staff commited on
Commit
4863ab0
·
verified ·
0 Parent(s):

Super-squash branch 'v1.3.0' using huggingface_hub

Browse files

Co-authored-by: lhoestq <[email protected]>
Co-authored-by: anton-l <[email protected]>
Co-authored-by: loubnabnl <[email protected]>
Co-authored-by: thomwolf <[email protected]>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +55 -0
  2. README.md +626 -0
  3. data/CC-MAIN-2013-20/train-00000-of-00014.parquet +3 -0
  4. data/CC-MAIN-2013-20/train-00001-of-00014.parquet +3 -0
  5. data/CC-MAIN-2013-20/train-00002-of-00014.parquet +3 -0
  6. data/CC-MAIN-2013-20/train-00003-of-00014.parquet +3 -0
  7. data/CC-MAIN-2013-20/train-00004-of-00014.parquet +3 -0
  8. data/CC-MAIN-2013-20/train-00005-of-00014.parquet +3 -0
  9. data/CC-MAIN-2013-20/train-00006-of-00014.parquet +3 -0
  10. data/CC-MAIN-2013-20/train-00007-of-00014.parquet +3 -0
  11. data/CC-MAIN-2013-20/train-00008-of-00014.parquet +3 -0
  12. data/CC-MAIN-2013-20/train-00009-of-00014.parquet +3 -0
  13. data/CC-MAIN-2013-20/train-00010-of-00014.parquet +3 -0
  14. data/CC-MAIN-2013-20/train-00011-of-00014.parquet +3 -0
  15. data/CC-MAIN-2013-20/train-00012-of-00014.parquet +3 -0
  16. data/CC-MAIN-2013-20/train-00013-of-00014.parquet +3 -0
  17. data/CC-MAIN-2013-48/train-00000-of-00014.parquet +3 -0
  18. data/CC-MAIN-2013-48/train-00001-of-00014.parquet +3 -0
  19. data/CC-MAIN-2013-48/train-00002-of-00014.parquet +3 -0
  20. data/CC-MAIN-2013-48/train-00003-of-00014.parquet +3 -0
  21. data/CC-MAIN-2013-48/train-00004-of-00014.parquet +3 -0
  22. data/CC-MAIN-2013-48/train-00005-of-00014.parquet +3 -0
  23. data/CC-MAIN-2013-48/train-00006-of-00014.parquet +3 -0
  24. data/CC-MAIN-2013-48/train-00007-of-00014.parquet +3 -0
  25. data/CC-MAIN-2013-48/train-00008-of-00014.parquet +3 -0
  26. data/CC-MAIN-2013-48/train-00009-of-00014.parquet +3 -0
  27. data/CC-MAIN-2013-48/train-00010-of-00014.parquet +3 -0
  28. data/CC-MAIN-2013-48/train-00011-of-00014.parquet +3 -0
  29. data/CC-MAIN-2013-48/train-00012-of-00014.parquet +3 -0
  30. data/CC-MAIN-2013-48/train-00013-of-00014.parquet +3 -0
  31. data/CC-MAIN-2014-10/train-00000-of-00014.parquet +3 -0
  32. data/CC-MAIN-2014-10/train-00001-of-00014.parquet +3 -0
  33. data/CC-MAIN-2014-10/train-00002-of-00014.parquet +3 -0
  34. data/CC-MAIN-2014-10/train-00003-of-00014.parquet +3 -0
  35. data/CC-MAIN-2014-10/train-00004-of-00014.parquet +3 -0
  36. data/CC-MAIN-2014-10/train-00005-of-00014.parquet +3 -0
  37. data/CC-MAIN-2014-10/train-00006-of-00014.parquet +3 -0
  38. data/CC-MAIN-2014-10/train-00007-of-00014.parquet +3 -0
  39. data/CC-MAIN-2014-10/train-00008-of-00014.parquet +3 -0
  40. data/CC-MAIN-2014-10/train-00009-of-00014.parquet +3 -0
  41. data/CC-MAIN-2014-10/train-00010-of-00014.parquet +3 -0
  42. data/CC-MAIN-2014-10/train-00011-of-00014.parquet +3 -0
  43. data/CC-MAIN-2014-10/train-00012-of-00014.parquet +3 -0
  44. data/CC-MAIN-2014-10/train-00013-of-00014.parquet +3 -0
  45. data/CC-MAIN-2014-15/train-00000-of-00014.parquet +3 -0
  46. data/CC-MAIN-2014-15/train-00001-of-00014.parquet +3 -0
  47. data/CC-MAIN-2014-15/train-00002-of-00014.parquet +3 -0
  48. data/CC-MAIN-2014-15/train-00003-of-00014.parquet +3 -0
  49. data/CC-MAIN-2014-15/train-00004-of-00014.parquet +3 -0
  50. data/CC-MAIN-2014-15/train-00005-of-00014.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: odc-by
3
+ task_categories:
4
+ - text-generation
5
+ language:
6
+ - en
7
+ pretty_name: FineWeb-Edu
8
+ size_categories:
9
+ - n>1T
10
+ configs:
11
+ - config_name: default
12
+ data_files:
13
+ - split: train
14
+ path: data/*/*
15
+ features:
16
+ - name: text
17
+ dtype: string
18
+ - name: id
19
+ dtype: string
20
+ - name: dump
21
+ dtype: string
22
+ - name: url
23
+ dtype: string
24
+ - name: date
25
+ dtype: string
26
+ - name: file_path
27
+ dtype: string
28
+ - name: language
29
+ dtype: string
30
+ - name: language_score
31
+ dtype: float64
32
+ - name: token_count
33
+ dtype: int64
34
+ - name: score
35
+ dtype: float64
36
+ - name: int_score
37
+ dtype: int64
38
+ - config_name: sample-10BT
39
+ data_files:
40
+ - split: train
41
+ path: sample/10BT/*
42
+ - config_name: sample-100BT
43
+ data_files:
44
+ - split: train
45
+ path: sample/100BT/*
46
+ - config_name: sample-350BT
47
+ data_files:
48
+ - split: train
49
+ path: sample/350BT/*
50
+ - config_name: CC-MAIN-2024-51
51
+ data_files:
52
+ - split: train
53
+ path: data/CC-MAIN-2024-51/*
54
+ - config_name: CC-MAIN-2024-46
55
+ data_files:
56
+ - split: train
57
+ path: data/CC-MAIN-2024-46/*
58
+ - config_name: CC-MAIN-2024-42
59
+ data_files:
60
+ - split: train
61
+ path: data/CC-MAIN-2024-42/*
62
+ - config_name: CC-MAIN-2024-38
63
+ data_files:
64
+ - split: train
65
+ path: data/CC-MAIN-2024-38/*
66
+ - config_name: CC-MAIN-2024-33
67
+ data_files:
68
+ - split: train
69
+ path: data/CC-MAIN-2024-33/*
70
+ - config_name: CC-MAIN-2024-30
71
+ data_files:
72
+ - split: train
73
+ path: data/CC-MAIN-2024-30/*
74
+ - config_name: CC-MAIN-2024-26
75
+ data_files:
76
+ - split: train
77
+ path: data/CC-MAIN-2024-26/*
78
+ - config_name: CC-MAIN-2024-22
79
+ data_files:
80
+ - split: train
81
+ path: data/CC-MAIN-2024-22/*
82
+ - config_name: CC-MAIN-2024-18
83
+ data_files:
84
+ - split: train
85
+ path: data/CC-MAIN-2024-18/*
86
+ - config_name: CC-MAIN-2024-10
87
+ data_files:
88
+ - split: train
89
+ path: data/CC-MAIN-2024-10/*
90
+ - config_name: CC-MAIN-2023-50
91
+ data_files:
92
+ - split: train
93
+ path: data/CC-MAIN-2023-50/*
94
+ - config_name: CC-MAIN-2023-40
95
+ data_files:
96
+ - split: train
97
+ path: data/CC-MAIN-2023-40/*
98
+ - config_name: CC-MAIN-2023-23
99
+ data_files:
100
+ - split: train
101
+ path: data/CC-MAIN-2023-23/*
102
+ - config_name: CC-MAIN-2023-14
103
+ data_files:
104
+ - split: train
105
+ path: data/CC-MAIN-2023-14/*
106
+ - config_name: CC-MAIN-2023-06
107
+ data_files:
108
+ - split: train
109
+ path: data/CC-MAIN-2023-06/*
110
+ - config_name: CC-MAIN-2022-49
111
+ data_files:
112
+ - split: train
113
+ path: data/CC-MAIN-2022-49/*
114
+ - config_name: CC-MAIN-2022-40
115
+ data_files:
116
+ - split: train
117
+ path: data/CC-MAIN-2022-40/*
118
+ - config_name: CC-MAIN-2022-33
119
+ data_files:
120
+ - split: train
121
+ path: data/CC-MAIN-2022-33/*
122
+ - config_name: CC-MAIN-2022-27
123
+ data_files:
124
+ - split: train
125
+ path: data/CC-MAIN-2022-27/*
126
+ - config_name: CC-MAIN-2022-21
127
+ data_files:
128
+ - split: train
129
+ path: data/CC-MAIN-2022-21/*
130
+ - config_name: CC-MAIN-2022-05
131
+ data_files:
132
+ - split: train
133
+ path: data/CC-MAIN-2022-05/*
134
+ - config_name: CC-MAIN-2021-49
135
+ data_files:
136
+ - split: train
137
+ path: data/CC-MAIN-2021-49/*
138
+ - config_name: CC-MAIN-2021-43
139
+ data_files:
140
+ - split: train
141
+ path: data/CC-MAIN-2021-43/*
142
+ - config_name: CC-MAIN-2021-39
143
+ data_files:
144
+ - split: train
145
+ path: data/CC-MAIN-2021-39/*
146
+ - config_name: CC-MAIN-2021-31
147
+ data_files:
148
+ - split: train
149
+ path: data/CC-MAIN-2021-31/*
150
+ - config_name: CC-MAIN-2021-25
151
+ data_files:
152
+ - split: train
153
+ path: data/CC-MAIN-2021-25/*
154
+ - config_name: CC-MAIN-2021-21
155
+ data_files:
156
+ - split: train
157
+ path: data/CC-MAIN-2021-21/*
158
+ - config_name: CC-MAIN-2021-17
159
+ data_files:
160
+ - split: train
161
+ path: data/CC-MAIN-2021-17/*
162
+ - config_name: CC-MAIN-2021-10
163
+ data_files:
164
+ - split: train
165
+ path: data/CC-MAIN-2021-10/*
166
+ - config_name: CC-MAIN-2021-04
167
+ data_files:
168
+ - split: train
169
+ path: data/CC-MAIN-2021-04/*
170
+ - config_name: CC-MAIN-2020-50
171
+ data_files:
172
+ - split: train
173
+ path: data/CC-MAIN-2020-50/*
174
+ - config_name: CC-MAIN-2020-45
175
+ data_files:
176
+ - split: train
177
+ path: data/CC-MAIN-2020-45/*
178
+ - config_name: CC-MAIN-2020-40
179
+ data_files:
180
+ - split: train
181
+ path: data/CC-MAIN-2020-40/*
182
+ - config_name: CC-MAIN-2020-34
183
+ data_files:
184
+ - split: train
185
+ path: data/CC-MAIN-2020-34/*
186
+ - config_name: CC-MAIN-2020-29
187
+ data_files:
188
+ - split: train
189
+ path: data/CC-MAIN-2020-29/*
190
+ - config_name: CC-MAIN-2020-24
191
+ data_files:
192
+ - split: train
193
+ path: data/CC-MAIN-2020-24/*
194
+ - config_name: CC-MAIN-2020-16
195
+ data_files:
196
+ - split: train
197
+ path: data/CC-MAIN-2020-16/*
198
+ - config_name: CC-MAIN-2020-10
199
+ data_files:
200
+ - split: train
201
+ path: data/CC-MAIN-2020-10/*
202
+ - config_name: CC-MAIN-2020-05
203
+ data_files:
204
+ - split: train
205
+ path: data/CC-MAIN-2020-05/*
206
+ - config_name: CC-MAIN-2019-51
207
+ data_files:
208
+ - split: train
209
+ path: data/CC-MAIN-2019-51/*
210
+ - config_name: CC-MAIN-2019-47
211
+ data_files:
212
+ - split: train
213
+ path: data/CC-MAIN-2019-47/*
214
+ - config_name: CC-MAIN-2019-43
215
+ data_files:
216
+ - split: train
217
+ path: data/CC-MAIN-2019-43/*
218
+ - config_name: CC-MAIN-2019-39
219
+ data_files:
220
+ - split: train
221
+ path: data/CC-MAIN-2019-39/*
222
+ - config_name: CC-MAIN-2019-35
223
+ data_files:
224
+ - split: train
225
+ path: data/CC-MAIN-2019-35/*
226
+ - config_name: CC-MAIN-2019-30
227
+ data_files:
228
+ - split: train
229
+ path: data/CC-MAIN-2019-30/*
230
+ - config_name: CC-MAIN-2019-26
231
+ data_files:
232
+ - split: train
233
+ path: data/CC-MAIN-2019-26/*
234
+ - config_name: CC-MAIN-2019-22
235
+ data_files:
236
+ - split: train
237
+ path: data/CC-MAIN-2019-22/*
238
+ - config_name: CC-MAIN-2019-18
239
+ data_files:
240
+ - split: train
241
+ path: data/CC-MAIN-2019-18/*
242
+ - config_name: CC-MAIN-2019-13
243
+ data_files:
244
+ - split: train
245
+ path: data/CC-MAIN-2019-13/*
246
+ - config_name: CC-MAIN-2019-09
247
+ data_files:
248
+ - split: train
249
+ path: data/CC-MAIN-2019-09/*
250
+ - config_name: CC-MAIN-2019-04
251
+ data_files:
252
+ - split: train
253
+ path: data/CC-MAIN-2019-04/*
254
+ - config_name: CC-MAIN-2018-51
255
+ data_files:
256
+ - split: train
257
+ path: data/CC-MAIN-2018-51/*
258
+ - config_name: CC-MAIN-2018-47
259
+ data_files:
260
+ - split: train
261
+ path: data/CC-MAIN-2018-47/*
262
+ - config_name: CC-MAIN-2018-43
263
+ data_files:
264
+ - split: train
265
+ path: data/CC-MAIN-2018-43/*
266
+ - config_name: CC-MAIN-2018-39
267
+ data_files:
268
+ - split: train
269
+ path: data/CC-MAIN-2018-39/*
270
+ - config_name: CC-MAIN-2018-34
271
+ data_files:
272
+ - split: train
273
+ path: data/CC-MAIN-2018-34/*
274
+ - config_name: CC-MAIN-2018-30
275
+ data_files:
276
+ - split: train
277
+ path: data/CC-MAIN-2018-30/*
278
+ - config_name: CC-MAIN-2018-26
279
+ data_files:
280
+ - split: train
281
+ path: data/CC-MAIN-2018-26/*
282
+ - config_name: CC-MAIN-2018-22
283
+ data_files:
284
+ - split: train
285
+ path: data/CC-MAIN-2018-22/*
286
+ - config_name: CC-MAIN-2018-17
287
+ data_files:
288
+ - split: train
289
+ path: data/CC-MAIN-2018-17/*
290
+ - config_name: CC-MAIN-2018-13
291
+ data_files:
292
+ - split: train
293
+ path: data/CC-MAIN-2018-13/*
294
+ - config_name: CC-MAIN-2018-09
295
+ data_files:
296
+ - split: train
297
+ path: data/CC-MAIN-2018-09/*
298
+ - config_name: CC-MAIN-2018-05
299
+ data_files:
300
+ - split: train
301
+ path: data/CC-MAIN-2018-05/*
302
+ - config_name: CC-MAIN-2017-51
303
+ data_files:
304
+ - split: train
305
+ path: data/CC-MAIN-2017-51/*
306
+ - config_name: CC-MAIN-2017-47
307
+ data_files:
308
+ - split: train
309
+ path: data/CC-MAIN-2017-47/*
310
+ - config_name: CC-MAIN-2017-43
311
+ data_files:
312
+ - split: train
313
+ path: data/CC-MAIN-2017-43/*
314
+ - config_name: CC-MAIN-2017-39
315
+ data_files:
316
+ - split: train
317
+ path: data/CC-MAIN-2017-39/*
318
+ - config_name: CC-MAIN-2017-34
319
+ data_files:
320
+ - split: train
321
+ path: data/CC-MAIN-2017-34/*
322
+ - config_name: CC-MAIN-2017-30
323
+ data_files:
324
+ - split: train
325
+ path: data/CC-MAIN-2017-30/*
326
+ - config_name: CC-MAIN-2017-26
327
+ data_files:
328
+ - split: train
329
+ path: data/CC-MAIN-2017-26/*
330
+ - config_name: CC-MAIN-2017-22
331
+ data_files:
332
+ - split: train
333
+ path: data/CC-MAIN-2017-22/*
334
+ - config_name: CC-MAIN-2017-17
335
+ data_files:
336
+ - split: train
337
+ path: data/CC-MAIN-2017-17/*
338
+ - config_name: CC-MAIN-2017-13
339
+ data_files:
340
+ - split: train
341
+ path: data/CC-MAIN-2017-13/*
342
+ - config_name: CC-MAIN-2017-09
343
+ data_files:
344
+ - split: train
345
+ path: data/CC-MAIN-2017-09/*
346
+ - config_name: CC-MAIN-2017-04
347
+ data_files:
348
+ - split: train
349
+ path: data/CC-MAIN-2017-04/*
350
+ - config_name: CC-MAIN-2016-50
351
+ data_files:
352
+ - split: train
353
+ path: data/CC-MAIN-2016-50/*
354
+ - config_name: CC-MAIN-2016-44
355
+ data_files:
356
+ - split: train
357
+ path: data/CC-MAIN-2016-44/*
358
+ - config_name: CC-MAIN-2016-40
359
+ data_files:
360
+ - split: train
361
+ path: data/CC-MAIN-2016-40/*
362
+ - config_name: CC-MAIN-2016-36
363
+ data_files:
364
+ - split: train
365
+ path: data/CC-MAIN-2016-36/*
366
+ - config_name: CC-MAIN-2016-30
367
+ data_files:
368
+ - split: train
369
+ path: data/CC-MAIN-2016-30/*
370
+ - config_name: CC-MAIN-2016-26
371
+ data_files:
372
+ - split: train
373
+ path: data/CC-MAIN-2016-26/*
374
+ - config_name: CC-MAIN-2016-22
375
+ data_files:
376
+ - split: train
377
+ path: data/CC-MAIN-2016-22/*
378
+ - config_name: CC-MAIN-2016-18
379
+ data_files:
380
+ - split: train
381
+ path: data/CC-MAIN-2016-18/*
382
+ - config_name: CC-MAIN-2016-07
383
+ data_files:
384
+ - split: train
385
+ path: data/CC-MAIN-2016-07/*
386
+ - config_name: CC-MAIN-2015-48
387
+ data_files:
388
+ - split: train
389
+ path: data/CC-MAIN-2015-48/*
390
+ - config_name: CC-MAIN-2015-40
391
+ data_files:
392
+ - split: train
393
+ path: data/CC-MAIN-2015-40/*
394
+ - config_name: CC-MAIN-2015-35
395
+ data_files:
396
+ - split: train
397
+ path: data/CC-MAIN-2015-35/*
398
+ - config_name: CC-MAIN-2015-32
399
+ data_files:
400
+ - split: train
401
+ path: data/CC-MAIN-2015-32/*
402
+ - config_name: CC-MAIN-2015-27
403
+ data_files:
404
+ - split: train
405
+ path: data/CC-MAIN-2015-27/*
406
+ - config_name: CC-MAIN-2015-22
407
+ data_files:
408
+ - split: train
409
+ path: data/CC-MAIN-2015-22/*
410
+ - config_name: CC-MAIN-2015-18
411
+ data_files:
412
+ - split: train
413
+ path: data/CC-MAIN-2015-18/*
414
+ - config_name: CC-MAIN-2015-14
415
+ data_files:
416
+ - split: train
417
+ path: data/CC-MAIN-2015-14/*
418
+ - config_name: CC-MAIN-2015-11
419
+ data_files:
420
+ - split: train
421
+ path: data/CC-MAIN-2015-11/*
422
+ - config_name: CC-MAIN-2015-06
423
+ data_files:
424
+ - split: train
425
+ path: data/CC-MAIN-2015-06/*
426
+ - config_name: CC-MAIN-2014-52
427
+ data_files:
428
+ - split: train
429
+ path: data/CC-MAIN-2014-52/*
430
+ - config_name: CC-MAIN-2014-49
431
+ data_files:
432
+ - split: train
433
+ path: data/CC-MAIN-2014-49/*
434
+ - config_name: CC-MAIN-2014-42
435
+ data_files:
436
+ - split: train
437
+ path: data/CC-MAIN-2014-42/*
438
+ - config_name: CC-MAIN-2014-41
439
+ data_files:
440
+ - split: train
441
+ path: data/CC-MAIN-2014-41/*
442
+ - config_name: CC-MAIN-2014-35
443
+ data_files:
444
+ - split: train
445
+ path: data/CC-MAIN-2014-35/*
446
+ - config_name: CC-MAIN-2014-23
447
+ data_files:
448
+ - split: train
449
+ path: data/CC-MAIN-2014-23/*
450
+ - config_name: CC-MAIN-2014-15
451
+ data_files:
452
+ - split: train
453
+ path: data/CC-MAIN-2014-15/*
454
+ - config_name: CC-MAIN-2014-10
455
+ data_files:
456
+ - split: train
457
+ path: data/CC-MAIN-2014-10/*
458
+ - config_name: CC-MAIN-2013-48
459
+ data_files:
460
+ - split: train
461
+ path: data/CC-MAIN-2013-48/*
462
+ - config_name: CC-MAIN-2013-20
463
+ data_files:
464
+ - split: train
465
+ path: data/CC-MAIN-2013-20/*
466
+ ---
467
+
468
+ # 📚 FineWeb-Edu
469
+ <center>
470
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/61c141342aac764ce1654e43/wwRnEQydH9qdRtFofIE-A.png" alt="FineWeb-Edu: The finest collection of educational content the web has to offer">
471
+ </center>
472
+
473
+ > 1.3 trillion tokens of the finest educational data the 🌐 web has to offer
474
+
475
+ **Paper:** https://arxiv.org/abs/2406.17557
476
+
477
+ ## What is it?
478
+
479
+ 📚 FineWeb-Edu dataset consists of **1.3T tokens** and **5.4T tokens** ([FineWeb-Edu-score-2](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu-score-2)) of educational web pages filtered from 🍷 FineWeb dataset. This is the 1.3 trillion version.
480
+
481
+ To enhance FineWeb's quality, we developed an [educational quality classifier](https://huggingface.co/HuggingFaceFW/fineweb-edu-classifier) using annotations generated by LLama3-70B-Instruct. We then used this classifier to retain only the most educational web pages. FineWeb-Edu outperforms FineWeb on popular benchmarks and shows the power of classifiers trained on synthetic data.
482
+
483
+ The [Dataset Curation](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu#dataset-curation) section details the process for creating the dataset.
484
+
485
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/61c141342aac764ce1654e43/QqXOM8h_ZjjhuCv71xmV7.png)
486
+
487
+ You can find a deduplicated version of FineWeb-edu in [SmolLM-Corpus](https://huggingface.co/datasets/HuggingFaceTB/smollm-corpus). We find that the deduplication of this dataset doesn't have any impact on model performance in our ablation setup (1.8B trained on 350B tokens).
488
+
489
+ ## What is being released?
490
+
491
+ Along with the dataset, which includes all filtered CommonCrawl dumps since 2013, we also release the educational classifier used for the filtering as well as the code for training it and running inference at: https://github.com/huggingface/cosmopedia/tree/main/classification
492
+
493
+ ## Changelog
494
+ _Previous versions remain available in the branch `version name`._
495
+
496
+ - **v1.3.0 (31-01-2025):** Fixed an issue with some dumps where some documents hadn't been processed: `CC-MAIN-2024-10`, `CC-MAIN-2024-18`, `CC-MAIN-2024-22`, `CC-MAIN-2024-26`, `CC-MAIN-2024-30`, `CC-MAIN-2024-33`, `CC-MAIN-2024-38`, `CC-MAIN-2024-42`, `CC-MAIN-2024-46` -- they now contain more data (~35B additional tokens).
497
+ - **v1.2.0 (03-01-2025):** Added 9 new snapshots: `CC-MAIN-2024-18`, `CC-MAIN-2024-22`, `CC-MAIN-2024-26`, `CC-MAIN-2024-30`, `CC-MAIN-2024-33`, `CC-MAIN-2024-38`, `CC-MAIN-2024-42`, `CC-MAIN-2024-46`, `CC-MAIN-2024-51`, covering April to December 2024.
498
+ - **v1.0.0 (02-06-2024):** Initial version
499
+
500
+
501
+ ## How to load the dataset
502
+ Similarily to FineWeb, You can load the full dataset or a specific crawl/dump. Dumps have the format `CC-MAIN-(year)-(week number)`.
503
+
504
+ ### (Smaller) sample versions
505
+ Along with config `default` (all the data), and the configs for each individual dump, you can also download the following configs:
506
+ - `sample-350BT`: a subset randomly sampled from the whole dataset of around 350B gpt2 tokens
507
+ - `sample-100BT`: a subset randomly sampled from the whole dataset of around 100B gpt2 tokens
508
+ - `sample-10BT`: a subset randomly sampled from the whole dataset of around 10B gpt2 tokens
509
+
510
+ `sample-10BT` was sampled from `sample-100BT` which in turn was sampled from `sample-350BT`.
511
+
512
+ ### Using 🏭 [`datatrove`](https://github.com/huggingface/datatrove/)
513
+
514
+ ```python
515
+ from datatrove.pipeline.readers import ParquetReader
516
+
517
+ # limit determines how many documents will be streamed (remove for all)
518
+ data_reader = ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu", glob_pattern="data/*/*.parquet", limit=1000)
519
+ # or to fetch a specific dump CC-MAIN-2024-10, eplace "CC-MAIN-2024-10" with "sample/100BT" to use the 100BT sample
520
+ data_reader = ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu/CC-MAIN-2024-10", limit=1000)
521
+ for document in data_reader():
522
+ # do something with document
523
+ print(document)
524
+
525
+ ###############################
526
+ # OR for a processing pipeline:
527
+ ###############################
528
+
529
+ from datatrove.executor import LocalPipelineExecutor
530
+ from datatrove.pipeline.readers import ParquetReader
531
+ from datatrove.pipeline.filters import LambdaFilter
532
+ from datatrove.pipeline.writers import JsonlWriter
533
+
534
+ pipeline_exec = LocalPipelineExecutor(
535
+ pipeline=[
536
+ # replace "CC-MAIN-2024-10" with "sample/100BT" to use the 100BT sample
537
+ ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu/CC-MAIN-2024-10", limit=1000),
538
+ LambdaFilter(lambda doc: "hugging" in doc.text),
539
+ JsonlWriter("some-output-path")
540
+ ],
541
+ tasks=10
542
+ )
543
+ pipeline_exec.run()
544
+ ```
545
+
546
+ ### Using `datasets`
547
+
548
+ ```python
549
+ from datasets import load_dataset
550
+ # use name="sample-10BT" to use the 10BT sample
551
+ fw = load_dataset("HuggingFaceFW/fineweb-edu", name="CC-MAIN-2024-10", split="train", streaming=True)
552
+ ```
553
+
554
+ ## Dataset curation
555
+ A new approach has recently emerged for filtering LLM training datasets: using synthetic data to develop classifiers for identifying educational content. This technique was used in the trainings of [LLama3](https://ai.meta.com/blog/meta-llama-3-meta-ai-responsibility/) and [Phi3](https://arxiv.org/abs/2404.14219), but its large-scale impact on web data filtering hasn't been fully explored or published.
556
+
557
+ The highly popular Phi3 models were trained on 3.3 and 4.8 trillion tokens, with the paper stating: “Our training data consists of heavily filtered publicly available web data (according to the 'educational level') from various open internet sources, as well as synthetic LLM-generated data". Similarly, the LLama3 blog post notes: “We found that previous generations of Llama are good at identifying high-quality data, so we used Llama 2 to help build the text-quality classifiers that are powering Llama 3.” However these classifiers and filtered datasets are not publicly available. To enhance FineWeb's quality, we developed an educational quality classifier using annotations generated by [LLama3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) to create FineWeb-Edu.
558
+
559
+ ### Annotation
560
+ We used [Llama3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) to score 500k FineWeb samples for their educational quality on a scale from 0 to 5.
561
+
562
+ We explored various prompts and found that the additive scale by [Yuan et al.](https://arxiv.org/pdf/2401.10020) worked best. To avoid the LLM favoring highly technical pages like arXiv abstracts and submissions, we focused on grade-school and middle-school level knowledge. By setting a threshold of 3 (on a scale of 0 to 5) during the filtering process, we were able to also retain some high-level educational pages. The final prompt can be found [here](https://huggingface.co/HuggingFaceFW/fineweb-edu-classifier/blob/main/utils/prompt.txt).
563
+
564
+ We also experimented with different LLMs: Llama3-70B-Instruct, Mixtral-8x-7B-Instruct, and Mixtral-8x22B-Instruct. Llama 3 and Mixtral-8x22B produced similar scores, while Mixtral-8x7B tended to be more generous, not fully adhering to the score scale. Verga et al. suggest using multiple LLMs as juries. We tried averaging the scores from the three models, but this shifted the distribution to the right due to the higher scores from Mixtral-8x7B. Training on a dataset filtered with a classifier using jury annotations performed worse than using a classifier based on Llama3 annotations. We hypothesize that the jury-based approach retains more low-quality samples.
565
+
566
+ ### Classifier training
567
+ We fine-tuned a Bert-like regression model using these annotations, based on [Snowflake-arctic-embed](https://huggingface.co/Snowflake/snowflake-arctic-embed-m). When converted to a binary classification using a score of 3 as a threshold for keeping and removing files, the model achieved an F1 score of 82%. The classification of FineWeb 15T tokens took 6k H100 GPU hours.
568
+
569
+ The classifier is available at: [HuggingFaceFW/fineweb-edu-classifier/](https://huggingface.co/HuggingFaceFW/fineweb-edu-classifier/)
570
+
571
+ ### Filtering and results
572
+ **Note**: You can find more details about the ablations and results in the FineWeb [blog post](https://huggingface.co/spaces/HuggingFaceFW/blogpost-fineweb-v1).
573
+
574
+ We investigated the impact of using different thresholds for the filtering and found that threshold 3 gave the best overall results. Although using a threshold higher than 3 improves performance on knowledge and reasoning intensive benchmarks, it significantly degrades performance on HellaSwag and PIQA.
575
+
576
+ We then built 📚 FineWeb-Edu by filtering out samples with scores lower than 3. This removed 92% of the dataset, leaving us with 1.3T educational tokens. Our ablation demonstrated that this refined dataset surpasses 🍷 FineWeb and all other open web datasets, with remarkable improvements on educational benchmarks such as MMLU, ARC, and OpenBookQA. The plot below compares FineWeb-Edu to other web datasets:
577
+
578
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/61c141342aac764ce1654e43/hJlyTgDzZpYuxO9LUm0PF.png)
579
+
580
+ To retain more tokens, we also experimented with a less strict threshold of 2 instead of 3. While being less performant than using threshold 3, it still outperformed FineWeb and it preserved 5.4T tokens. We release these two dataset as [FineWeb-Edu](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu) and [FineWeb-Edu-score-2](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu-score-2) along with the [classifier](https://huggingface.co/HuggingFaceFW/fineweb-edu-classifier).
581
+
582
+ You will find all the ablation models in [this collection](https://huggingface.co/collections/HuggingFaceFW/ablation-models-662457b0d213e8c14fe47f32). The FineWeb-Edu ablation model (trained on 350B tokens) is available at [https://huggingface.co/HuggingFaceFW/ablation-model-fineweb-edu](https://huggingface.co/HuggingFaceFW/ablation-model-fineweb-edu).
583
+
584
+ ## Considerations for Using the Data
585
+ This section is copied from the parent dataset: [FineWeb](https://huggingface.co/datasets/HuggingFaceFW/fineweb).
586
+
587
+ ### Social Impact of Dataset
588
+
589
+ With the release of this dataset we aim to make model training more accessible to the machine learning community at large.
590
+
591
+ While multiple open-weights models with strong performance have been publicly released in the past, more often than not these releases are not accompanied by the corresponding training dataset. This is unfortunate as the dataset specificities and characteristics have been demonstrated to have a very large impact and role in the performances of the models. As the creation of a high quality training dataset is a fundamental requirement to training an LLM capable of excelling at downstream tasks, with 🍷 FineWeb we (a) not only make the dataset creation process more transparent, by sharing our entire processing setup including the codebase used, we also (b) help alleviate the costs of dataset curation, both in time and in compute, for model creators by publicly releasing our dataset with the community.
592
+
593
+ ### Discussion of Biases
594
+
595
+ Efforts were made to minimize the amount of NSFW and toxic content present in the dataset by employing filtering on the URL level. However, there are still a significant number of documents present in the final dataset that could be considered toxic or contain harmful content. As 🍷 FineWeb was sourced from the web as a whole, any harmful biases typically present in it may be reproduced on our dataset.
596
+
597
+ We deliberately avoided using machine learning filtering methods that define text quality based on the similarity to a “gold” source such as wikipedia or toxicity classifiers as these methods have been known to [disproportionately remove content in specific dialects](https://aclanthology.org/D16-1120/) and [overclassify as toxic text related to specific social identities](https://arxiv.org/pdf/2109.07445.pdf), respectively.
598
+
599
+ ### Other Known Limitations
600
+
601
+ As a consequence of some of the filtering steps applied, it is likely that code content is not prevalent in our dataset. If you are training a model that should also perform code tasks, we recommend you use 🍷 FineWeb with a code dataset, such as [The Stack v2](https://huggingface.co/datasets/bigcode/the-stack-v2). You should also probably consider complementing 🍷 FineWeb with specialized curated sources (such as Wikipedia, for example) as they will likely have better formatting than the wikipedia content included in 🍷 FineWeb (we did not tailor the processing to individual websites).
602
+
603
+ ## Additional Information
604
+
605
+ ### Licensing Information
606
+
607
+ The dataset is released under the **Open Data Commons Attribution License (ODC-By) v1.0** [license](https://opendatacommons.org/licenses/by/1-0/). The use of this dataset is also subject to [CommonCrawl's Terms of Use](https://commoncrawl.org/terms-of-use).
608
+
609
+ ### Future work
610
+
611
+ We plan to work on better educational classifier to improve the quality of FineWeb-Edu.
612
+
613
+ ### Citation Information
614
+
615
+ You can cite our paper https://arxiv.org/abs/2406.17557 or this dataset:
616
+
617
+ ```
618
+ @misc{lozhkov2024fineweb-edu,
619
+ author = { Lozhkov, Anton and Ben Allal, Loubna and von Werra, Leandro and Wolf, Thomas },
620
+ title = { FineWeb-Edu: the Finest Collection of Educational Content },
621
+ year = 2024,
622
+ url = { https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu },
623
+ doi = { 10.57967/hf/2497 },
624
+ publisher = { Hugging Face }
625
+ }
626
+ ```
data/CC-MAIN-2013-20/train-00000-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb989c566f6fba00ab61decc5f7aa1538a07d9b142e58a52ff790154528ffd03
3
+ size 2369456837
data/CC-MAIN-2013-20/train-00001-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90105b41a509da93651a4364127c83451f97de61e438074f86ece09dfd86961e
3
+ size 2376360695
data/CC-MAIN-2013-20/train-00002-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14fc9ec030a8f3a6dfee9a3286ab8eb979ec4cab3df1ad503779dc8dac631261
3
+ size 2366714674
data/CC-MAIN-2013-20/train-00003-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:145d0a0d25d7f441febc08abc46fcff2c95bab8ebafbc1f96f343f9ca2b83285
3
+ size 2349721791
data/CC-MAIN-2013-20/train-00004-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dce281f2924512c3f4c7ef9f39a6521bc04ca30b7fdcf7121ea9825316ff6de
3
+ size 2347567335
data/CC-MAIN-2013-20/train-00005-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea40c90b2ef3565c3382c9954f3d80584ff81daf6ad65606b54265d9125962ec
3
+ size 2344454381
data/CC-MAIN-2013-20/train-00006-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0acdafc24aa73112ee78f1c5f136e924b490a5d88f36677c1c444caa8507142b
3
+ size 2336964549
data/CC-MAIN-2013-20/train-00007-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68852a8748a3707e2eb5cb1bee0e4cb243588fc82d0552cfebf5741d42bbda92
3
+ size 2320625373
data/CC-MAIN-2013-20/train-00008-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11748c3f363cfa4aa456e59608222dd8fd6ee301ff9bdeb7eac57a46ed7bd5ab
3
+ size 2307363915
data/CC-MAIN-2013-20/train-00009-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b02eb37467007501a9bb06284023222d4991e0a5f7960778bcca24f6a7be2cb
3
+ size 2310654336
data/CC-MAIN-2013-20/train-00010-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f0fb9098157edb573d38a0cf9d27d6bf7718b13b0fb8a99e4bd8564434ba69c
3
+ size 2294070592
data/CC-MAIN-2013-20/train-00011-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e719a6f913aa3753e490f5170b1da5d82b5c82c8c82de2887530f2d82a9c874
3
+ size 2294546063
data/CC-MAIN-2013-20/train-00012-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7b0850b29f91c0bdfb0f3668379cd541d5c334bc23045e8aa89d31eeaacd7fc
3
+ size 2295986307
data/CC-MAIN-2013-20/train-00013-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71c607b1a2bc3cd304009b5601ac662041af1e0879506fd360c47d32de967d56
3
+ size 2284902248
data/CC-MAIN-2013-48/train-00000-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29db96009ac1de5a42336ec0ae4ec6fee3e8dd8aec4de1d1b50aa2b38478caec
3
+ size 2329592314
data/CC-MAIN-2013-48/train-00001-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db5fce1225d1b17bde6d3cd4f0148ae04e8580b7b8cd048c65f8d6414503e493
3
+ size 2332353351
data/CC-MAIN-2013-48/train-00002-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c16a52b1c352d4a70aef14721f72a3886ab3a4a6a1c252ff285dc9fa5ade402
3
+ size 2327303340
data/CC-MAIN-2013-48/train-00003-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:424a7bee659a160dc2d3a80e703e36439589850c126ebd1a4af8d010b840dd0f
3
+ size 2316277195
data/CC-MAIN-2013-48/train-00004-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1137697278aa528361f2e6fe241f299738949cabba15ce7e74990eeff682f0e5
3
+ size 2295811884
data/CC-MAIN-2013-48/train-00005-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aac083d0117e2b4e78d2b69b73c764020f26d6087d8a92bccb4a7a5c1ac2bbfd
3
+ size 2299104720
data/CC-MAIN-2013-48/train-00006-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a15ec6b424d336be9a4c704fa85e8fe46407123bc6ccea6053057c4634ef165
3
+ size 2278623295
data/CC-MAIN-2013-48/train-00007-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c0787aa05d46cc647796246b0fc93e2501bf0b53d9da45539d93c9184c20460
3
+ size 2274629249
data/CC-MAIN-2013-48/train-00008-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:008397de104732df8a08417815510d27d9daa5ddea5d1f4409c39f527b442a10
3
+ size 2273053146
data/CC-MAIN-2013-48/train-00009-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a994524b8aba4f1423b17cd1552c5c08fa77f5c086129c5ba7a86d72210c0738
3
+ size 2269555721
data/CC-MAIN-2013-48/train-00010-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:126a0f67008354d253786f887e6dd65accf99b5414821d7454f893d4c187a36c
3
+ size 2255912359
data/CC-MAIN-2013-48/train-00011-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d355beaec4969f4de01cc5c250ca6fe6f474b4e527d6e64219cb15fc2d885875
3
+ size 2258788899
data/CC-MAIN-2013-48/train-00012-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d26c7bb689d9f5581baabd33d440e30096a68811f6e95358b858bbbad405541
3
+ size 2252159861
data/CC-MAIN-2013-48/train-00013-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b4b9e8f331117c7210f967ddbc10decdebb3bd5267a5fbca9129ebe25be463e
3
+ size 2251108466
data/CC-MAIN-2014-10/train-00000-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:301b3805b7b505ccb899f1ffab25f7d06e1d19d8ae221f4177044e04a1943e2b
3
+ size 2378402603
data/CC-MAIN-2014-10/train-00001-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73ccd40928affce688bcc23e19a9288eda67f20762850d7ed08fb8848657aad3
3
+ size 2374581082
data/CC-MAIN-2014-10/train-00002-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10d45b5276fd3f0b984bcb8e8079892aa169a11bdb0cccc7b92eed2b7bf1419d
3
+ size 2370839030
data/CC-MAIN-2014-10/train-00003-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03d6f43cae56c3747c49480167122782466a879b724cd2e473f2c736dcc41f2f
3
+ size 2367174733
data/CC-MAIN-2014-10/train-00004-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2356a49dc4bae1c726b3bc27f2e1fb93955f5393b12e987faf79702d72c1168
3
+ size 2355201629
data/CC-MAIN-2014-10/train-00005-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48bd01b21e42d698e77a7cd025b3ce63298e686a00786968d68dca743fd9bba5
3
+ size 2353220675
data/CC-MAIN-2014-10/train-00006-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f091b04c2d26f7d39eb8ac5fef323798a2280276bd1a43d295038a35945935d
3
+ size 2344324541
data/CC-MAIN-2014-10/train-00007-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3be423a243289bc7afcaaa3566d9ceea9c027fd9daec6713040cae02ed3f6d8f
3
+ size 2339577028
data/CC-MAIN-2014-10/train-00008-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef5afa503b1606af25c5e9d83a4a86974e27a0f58ef0db6f1a0ae493dfaee373
3
+ size 2341770146
data/CC-MAIN-2014-10/train-00009-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6a9cabfbbbb018ada4c4f33d9bd00e0f82faf70b9c8c9c607f0d4f8671be3fa
3
+ size 2329802974
data/CC-MAIN-2014-10/train-00010-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fc7c2d1cca1cb60cde18fe6c647edee6e69a5e0d187297430d43ae06752a241
3
+ size 2325948344
data/CC-MAIN-2014-10/train-00011-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3d3a1ff655881b616b5bea1ccd120cbaffd5d0b0e1c5f587b86c24207a7a84a
3
+ size 2324674557
data/CC-MAIN-2014-10/train-00012-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67c4321416ea340f0aa9f42dbdbec5a002e17b67790d5d70f7483297b9bb0978
3
+ size 2326385593
data/CC-MAIN-2014-10/train-00013-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0820fe329bc52566cdaf591499a5432bbfd932bdd972bfe1018d7e39b03f668
3
+ size 2317814766
data/CC-MAIN-2014-15/train-00000-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9da4ada954d1f1d0c8be46717bc59ed9364fb611768fa4b20423495d9672408a
3
+ size 2271082635
data/CC-MAIN-2014-15/train-00001-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86df6b7ab75933cf935cc86fba6e05ddc6862251a106a9a6c561a81b865b7230
3
+ size 2276397299
data/CC-MAIN-2014-15/train-00002-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed1c7b21827aea13b8a66112e3e7ab82ca70793a2ad26418fd61ba26a3a2ad1b
3
+ size 2276871097
data/CC-MAIN-2014-15/train-00003-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a73bdfb43bae173832151f71441e8702c9bd99a4810a20e71c974f2e8fd8a098
3
+ size 2274158040
data/CC-MAIN-2014-15/train-00004-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:510e3c1cfc6b96efc4e5e89c9ea9103bc0dccc2ba45fbbff056507e6191d26af
3
+ size 2263202434
data/CC-MAIN-2014-15/train-00005-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17fc24a67cc070942015ca26a71660265f70ab223f8e2fc7a1d289e60e883959
3
+ size 2265126394