BramVanroy
commited on
Commit
•
ffddd15
1
Parent(s):
490daab
Update README.md
Browse files
README.md
CHANGED
@@ -16,7 +16,7 @@ dataset_info:
|
|
16 |
num_bytes: 3717538.0422429685
|
17 |
num_examples: 2375
|
18 |
download_size: 224184711
|
19 |
-
dataset_size: 371875896
|
20 |
- config_name: 100k
|
21 |
features:
|
22 |
- name: text
|
@@ -33,7 +33,7 @@ dataset_info:
|
|
33 |
num_bytes: 3654.5346534653463
|
34 |
num_examples: 3
|
35 |
download_size: 212072
|
36 |
-
dataset_size: 369108
|
37 |
- config_name: 10B
|
38 |
features:
|
39 |
- name: text
|
@@ -50,7 +50,7 @@ dataset_info:
|
|
50 |
num_bytes: 103964370.23416495
|
51 |
num_examples: 64000
|
52 |
download_size: 25249998174
|
53 |
-
dataset_size: 40008285333
|
54 |
- config_name: 10M
|
55 |
features:
|
56 |
- name: text
|
@@ -67,7 +67,7 @@ dataset_info:
|
|
67 |
num_bytes: 373730.00803400803
|
68 |
num_examples: 256
|
69 |
download_size: 22486785
|
70 |
-
dataset_size: 37432856
|
71 |
- config_name: 10k
|
72 |
features:
|
73 |
- name: text
|
@@ -101,7 +101,7 @@ dataset_info:
|
|
101 |
num_bytes: 104972711.86121707
|
102 |
num_examples: 64000
|
103 |
download_size: 37966833792
|
104 |
-
dataset_size: 60119681222
|
105 |
- config_name: 1B
|
106 |
features:
|
107 |
- name: text
|
@@ -118,7 +118,7 @@ dataset_info:
|
|
118 |
num_bytes: 38437701.880162396
|
119 |
num_examples: 28692
|
120 |
download_size: 2346974411
|
121 |
-
dataset_size: 3843814397
|
122 |
- config_name: 1M
|
123 |
features:
|
124 |
- name: text
|
@@ -135,7 +135,7 @@ dataset_info:
|
|
135 |
num_bytes: 37019.21197648787
|
136 |
num_examples: 27
|
137 |
download_size: 2183019
|
138 |
-
dataset_size: 3732085
|
139 |
- config_name: 20B
|
140 |
features:
|
141 |
- name: text
|
@@ -152,7 +152,7 @@ dataset_info:
|
|
152 |
num_bytes: 105482877.0574707
|
153 |
num_examples: 64000
|
154 |
download_size: 50682523292
|
155 |
-
dataset_size: 80231072356
|
156 |
- config_name: 25B
|
157 |
features:
|
158 |
- name: text
|
@@ -169,7 +169,7 @@ dataset_info:
|
|
169 |
num_bytes: 105790923.98284689
|
170 |
num_examples: 64000
|
171 |
download_size: 63397565382
|
172 |
-
dataset_size: 100342468245
|
173 |
- config_name: 30B
|
174 |
features:
|
175 |
- name: text
|
@@ -186,7 +186,7 @@ dataset_info:
|
|
186 |
num_bytes: 105997103.53253783
|
187 |
num_examples: 64000
|
188 |
download_size: 76111936677
|
189 |
-
dataset_size: 120453859676
|
190 |
- config_name: 5B
|
191 |
features:
|
192 |
- name: text
|
@@ -203,7 +203,7 @@ dataset_info:
|
|
203 |
num_bytes: 101031980.90819068
|
204 |
num_examples: 64000
|
205 |
download_size: 12526141470
|
206 |
-
dataset_size: 19896889444
|
207 |
configs:
|
208 |
- config_name: 100M
|
209 |
data_files:
|
@@ -277,11 +277,22 @@ configs:
|
|
277 |
path: 5B/train-*
|
278 |
- split: test
|
279 |
path: 5B/test-*
|
|
|
|
|
|
|
280 |
---
|
281 |
|
|
|
|
|
|
|
|
|
|
|
|
|
282 |
|
283 |
## Filtering
|
284 |
|
|
|
|
|
285 |
The baseline ratios (punctuation, uppercase, digits) were calculated on the SONAR-500 corpus (excluding WRPEA WRPED WRUEA WRUED WRUEB).
|
286 |
|
287 |
**CulturaX**:
|
@@ -531,4 +542,4 @@ BAD_PHRASES_DOC_LEVEL = {
|
|
531 |
- test_num_tokens: 26,520,069
|
532 |
- total_num_samples: 84,753,828
|
533 |
- train_num_samples: 84,689,828
|
534 |
-
- test_num_samples: 64,000
|
|
|
16 |
num_bytes: 3717538.0422429685
|
17 |
num_examples: 2375
|
18 |
download_size: 224184711
|
19 |
+
dataset_size: 371875896
|
20 |
- config_name: 100k
|
21 |
features:
|
22 |
- name: text
|
|
|
33 |
num_bytes: 3654.5346534653463
|
34 |
num_examples: 3
|
35 |
download_size: 212072
|
36 |
+
dataset_size: 369108
|
37 |
- config_name: 10B
|
38 |
features:
|
39 |
- name: text
|
|
|
50 |
num_bytes: 103964370.23416495
|
51 |
num_examples: 64000
|
52 |
download_size: 25249998174
|
53 |
+
dataset_size: 40008285333
|
54 |
- config_name: 10M
|
55 |
features:
|
56 |
- name: text
|
|
|
67 |
num_bytes: 373730.00803400803
|
68 |
num_examples: 256
|
69 |
download_size: 22486785
|
70 |
+
dataset_size: 37432856
|
71 |
- config_name: 10k
|
72 |
features:
|
73 |
- name: text
|
|
|
101 |
num_bytes: 104972711.86121707
|
102 |
num_examples: 64000
|
103 |
download_size: 37966833792
|
104 |
+
dataset_size: 60119681222
|
105 |
- config_name: 1B
|
106 |
features:
|
107 |
- name: text
|
|
|
118 |
num_bytes: 38437701.880162396
|
119 |
num_examples: 28692
|
120 |
download_size: 2346974411
|
121 |
+
dataset_size: 3843814397
|
122 |
- config_name: 1M
|
123 |
features:
|
124 |
- name: text
|
|
|
135 |
num_bytes: 37019.21197648787
|
136 |
num_examples: 27
|
137 |
download_size: 2183019
|
138 |
+
dataset_size: 3732085
|
139 |
- config_name: 20B
|
140 |
features:
|
141 |
- name: text
|
|
|
152 |
num_bytes: 105482877.0574707
|
153 |
num_examples: 64000
|
154 |
download_size: 50682523292
|
155 |
+
dataset_size: 80231072356
|
156 |
- config_name: 25B
|
157 |
features:
|
158 |
- name: text
|
|
|
169 |
num_bytes: 105790923.98284689
|
170 |
num_examples: 64000
|
171 |
download_size: 63397565382
|
172 |
+
dataset_size: 100342468245
|
173 |
- config_name: 30B
|
174 |
features:
|
175 |
- name: text
|
|
|
186 |
num_bytes: 105997103.53253783
|
187 |
num_examples: 64000
|
188 |
download_size: 76111936677
|
189 |
+
dataset_size: 120453859676
|
190 |
- config_name: 5B
|
191 |
features:
|
192 |
- name: text
|
|
|
203 |
num_bytes: 101031980.90819068
|
204 |
num_examples: 64000
|
205 |
download_size: 12526141470
|
206 |
+
dataset_size: 19896889444
|
207 |
configs:
|
208 |
- config_name: 100M
|
209 |
data_files:
|
|
|
277 |
path: 5B/train-*
|
278 |
- split: test
|
279 |
path: 5B/test-*
|
280 |
+
task_categories:
|
281 |
+
- text-generation
|
282 |
+
- text2text-generation
|
283 |
---
|
284 |
|
285 |
+
# Filtered CulturaX + Wikipedia for Dutch
|
286 |
+
|
287 |
+
This is a combined and filtered version of [CulturaX](https://huggingface.co/datasets/uonlp/CulturaX) and [Wikipedia](https://huggingface.co/datasets/wikimedia/wikipedia), only including Dutch. It is intended for the training of LLMs.
|
288 |
+
|
289 |
+
Different configs are available based on the number of tokens (see a section below with an overview). This can be useful if you want to know exactly how many tokens you have. Great for using as a streaming dataset, too. Tokenization is done with the big vocabulary of the `google/gemma-2b` tokenizer so depending on your tokenizer these exact numbers may differ.
|
290 |
+
|
291 |
|
292 |
## Filtering
|
293 |
|
294 |
+
While CultruaX already has done a lot of filtering, some more filtering can be done to improve the quality of the corpus. These filters are described below.
|
295 |
+
|
296 |
The baseline ratios (punctuation, uppercase, digits) were calculated on the SONAR-500 corpus (excluding WRPEA WRPED WRUEA WRUED WRUEB).
|
297 |
|
298 |
**CulturaX**:
|
|
|
542 |
- test_num_tokens: 26,520,069
|
543 |
- total_num_samples: 84,753,828
|
544 |
- train_num_samples: 84,689,828
|
545 |
+
- test_num_samples: 64,000
|