|
--- |
|
dataset_info: |
|
- config_name: 100M |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 368158357.95775706 |
|
num_examples: 235203 |
|
- name: test |
|
num_bytes: 3717538.0422429685 |
|
num_examples: 2375 |
|
download_size: 224184711 |
|
dataset_size: 371875896 |
|
- config_name: 100k |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 365453.4653465347 |
|
num_examples: 300 |
|
- name: test |
|
num_bytes: 3654.5346534653463 |
|
num_examples: 3 |
|
download_size: 212072 |
|
dataset_size: 369108 |
|
- config_name: 10B |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 39904320962.76584 |
|
num_examples: 24564921 |
|
- name: test |
|
num_bytes: 103964370.23416495 |
|
num_examples: 64000 |
|
download_size: 25249998174 |
|
dataset_size: 40008285333 |
|
- config_name: 10M |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 37059125.991965994 |
|
num_examples: 25385 |
|
- name: test |
|
num_bytes: 373730.00803400803 |
|
num_examples: 256 |
|
download_size: 22486785 |
|
dataset_size: 37432856 |
|
- config_name: 10k |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 37658.21052631579 |
|
num_examples: 37 |
|
- name: test |
|
num_bytes: 472 |
|
num_examples: 1 |
|
download_size: 30893 |
|
dataset_size: 38130.21052631579 |
|
- config_name: 15B |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 60014708510.13879 |
|
num_examples: 36589903 |
|
- name: test |
|
num_bytes: 104972711.86121707 |
|
num_examples: 64000 |
|
download_size: 37966833792 |
|
dataset_size: 60119681222 |
|
- config_name: 1B |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 3805376695.1198378 |
|
num_examples: 2840541 |
|
- name: test |
|
num_bytes: 38437701.880162396 |
|
num_examples: 28692 |
|
download_size: 2346974411 |
|
dataset_size: 3843814397 |
|
- config_name: 1M |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 3695065.7880235123 |
|
num_examples: 2695 |
|
- name: test |
|
num_bytes: 37019.21197648787 |
|
num_examples: 27 |
|
download_size: 2183019 |
|
dataset_size: 3732085 |
|
- config_name: 20B |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 80125589478.94254 |
|
num_examples: 48614883 |
|
- name: test |
|
num_bytes: 105482877.0574707 |
|
num_examples: 64000 |
|
download_size: 50682523292 |
|
dataset_size: 80231072356 |
|
- config_name: 25B |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 100236677321.01715 |
|
num_examples: 60639865 |
|
- name: test |
|
num_bytes: 105790923.98284689 |
|
num_examples: 64000 |
|
download_size: 63397565382 |
|
dataset_size: 100342468245 |
|
- config_name: 30B |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 120347862572.46747 |
|
num_examples: 72664846 |
|
- name: test |
|
num_bytes: 105997103.53253783 |
|
num_examples: 64000 |
|
download_size: 76111936677 |
|
dataset_size: 120453859676 |
|
- config_name: 5B |
|
features: |
|
- name: text |
|
dtype: string |
|
- name: url |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 19795857463.09181 |
|
num_examples: 12539939 |
|
- name: test |
|
num_bytes: 101031980.90819068 |
|
num_examples: 64000 |
|
download_size: 12526141470 |
|
dataset_size: 19896889444 |
|
configs: |
|
- config_name: 100M |
|
data_files: |
|
- split: train |
|
path: 100M/train-* |
|
- split: test |
|
path: 100M/test-* |
|
- config_name: 100k |
|
data_files: |
|
- split: train |
|
path: 100k/train-* |
|
- split: test |
|
path: 100k/test-* |
|
- config_name: 10B |
|
data_files: |
|
- split: train |
|
path: 10B/train-* |
|
- split: test |
|
path: 10B/test-* |
|
- config_name: 10M |
|
data_files: |
|
- split: train |
|
path: 10M/train-* |
|
- split: test |
|
path: 10M/test-* |
|
- config_name: 10k |
|
data_files: |
|
- split: train |
|
path: 10k/train-* |
|
- split: test |
|
path: 10k/test-* |
|
- config_name: 15B |
|
data_files: |
|
- split: train |
|
path: 15B/train-* |
|
- split: test |
|
path: 15B/test-* |
|
- config_name: 1B |
|
data_files: |
|
- split: train |
|
path: 1B/train-* |
|
- split: test |
|
path: 1B/test-* |
|
- config_name: 1M |
|
data_files: |
|
- split: train |
|
path: 1M/train-* |
|
- split: test |
|
path: 1M/test-* |
|
- config_name: 20B |
|
data_files: |
|
- split: train |
|
path: 20B/train-* |
|
- split: test |
|
path: 20B/test-* |
|
- config_name: 25B |
|
data_files: |
|
- split: train |
|
path: 25B/train-* |
|
- split: test |
|
path: 25B/test-* |
|
- config_name: 30B |
|
data_files: |
|
- split: train |
|
path: 30B/train-* |
|
- split: test |
|
path: 30B/test-* |
|
- config_name: 5B |
|
data_files: |
|
- split: train |
|
path: 5B/train-* |
|
- split: test |
|
path: 5B/test-* |
|
task_categories: |
|
- text-generation |
|
- text2text-generation |
|
--- |
|
|
|
# Filtered CulturaX + Wikipedia for Dutch |
|
|
|
This is a combined and filtered version of [CulturaX](https://huggingface.co/datasets/uonlp/CulturaX) and [Wikipedia](https://huggingface.co/datasets/wikimedia/wikipedia), only including Dutch. It is intended for the training of LLMs. |
|
|
|
Different configs are available based on the number of tokens (see a section below with an overview). This can be useful if you want to know exactly how many tokens you have. Great for using as a streaming dataset, too. Tokenization is done with the big vocabulary of the `google/gemma-2b` tokenizer so depending on your tokenizer these exact numbers may differ. |
|
|
|
|
|
## Filtering |
|
|
|
While CultruaX already has done a lot of filtering, some more filtering can be done to improve the quality of the corpus. These filters are described below. |
|
|
|
The baseline ratios (punctuation, uppercase, digits) were calculated on the SONAR-500 corpus (excluding WRPEA WRPED WRUEA WRUED WRUEB). |
|
|
|
**CulturaX**: |
|
- removed documents that contain the text "rechten voorbehouden" or "rights reserved" |
|
- remove document's whose URL contained "wikipedia.org" (because we include a cleaned version of Wikipedia ourselves) |
|
- removed documents that contain a "bad word" (see the section below) |
|
- removed documents that contain any non-latin characters. The idea is that "knowledge"-based information (e.g. original writing of a name) are allowed |
|
when the data comes from Wikipedia, but not from any other webcrawl, to avoid unsollicited noise. |
|
|
|
**CulturaX + Wikipedia**: |
|
- removed documents where ratio of punctuation marks vs. non-whitespace characters is higher than 0.2 |
|
- removed documents where ratio of uppercase vs. non-whitespace characters is higher than 0.22 |
|
- removed documents where ratio of digits vs. non-whitespace characters is higher than 0.16 |
|
- removed documents where the average token length is < 2 or > 20 |
|
|
|
## Bad words |
|
|
|
```python |
|
BAD_PHRASES_DOC_LEVEL = { |
|
# https://en.wikipedia.org/wiki/Dutch_profanity |
|
"achterlijk", |
|
"debiel", |
|
"downie", |
|
"idioot", |
|
"kankerlijer", |
|
"klere", |
|
"kolere", |
|
"minkukel", |
|
"pestkop", |
|
"pleuris", |
|
"pleuritis", |
|
"teringlijer", |
|
"tyfuslijer", |
|
"gadver", |
|
"getver", |
|
"godver", |
|
"godskolere", |
|
"godverork", |
|
"graftak", |
|
"kopvod", |
|
"verdomme", |
|
"anaalgeneraal", |
|
"bitch", |
|
"dikzak", |
|
"flikker", |
|
"fok", |
|
"fuck", |
|
"hoer", |
|
"klootzak", |
|
"klote", |
|
"kreng", |
|
"kringspiermusketier", |
|
"kut", |
|
"lamzak", |
|
"lul", |
|
"manwijf", |
|
"matennaai", |
|
"neuken", |
|
"neuker", |
|
"ouwehoer", |
|
"reet", |
|
"reetkever", |
|
"reetridder", |
|
"rotzak", |
|
"schijt", |
|
"shit", |
|
"slet", |
|
"slijmbal", |
|
"slons", |
|
"sodemieter", |
|
"stoephoer", |
|
"swaffel", |
|
"teef", |
|
"trut", |
|
"tut", |
|
"zak", |
|
"uilskuiken", |
|
"zeik", |
|
"bamivreter", |
|
"bosneger", |
|
"neger", |
|
"fransoos", |
|
"geitenneuker", |
|
"kaaskop", |
|
"kakker", |
|
"koelie", |
|
"lijp", |
|
"medelander", |
|
"mocro", |
|
"mof", |
|
"nikker", |
|
"poepchinees", |
|
"roetmop", |
|
"spaghettivreter", |
|
"loempiavouwer", |
|
"spanjool", |
|
"spleetoog", |
|
"tatta", |
|
"tokkie", |
|
"zandneger", |
|
"zwartzak", |
|
"halvezool", |
|
"kenau", |
|
"klootviool", |
|
"knuppel", |
|
"koekert", |
|
"koekwaus", |
|
"oelewapper", |
|
"smeerlap", |
|
"sukkel", |
|
"sul", |
|
"wappie", |
|
"wijf", |
|
"zooi", |
|
# xxx (a.o. https://gitlab.com/yhavinga/c4nlpreproc/-/blob/master/clean/badwords_ennl.py?ref_type=heads) |
|
"xxx", |
|
"anal", |
|
"blowjob", |
|
"buttplug", |
|
"cock", |
|
"cunt", |
|
"geil", |
|
"sex", # Standaardnederlands = seks, maybe we catch some porn or socialmedia sites with this misspelling |
|
"porn", |
|
# extra |
|
"nigger", |
|
"nigga", |
|
"hoerig", |
|
"klojo", |
|
} |
|
``` |
|
|
|
## Config details |
|
|
|
`10k` |
|
- ratio_wikipedia: 100.00% |
|
- total_num_tokens: 10,078 |
|
- train_num_tokens: 9,957 |
|
- test_num_tokens: 121 |
|
- total_num_samples: 38 |
|
- train_num_samples: 37 |
|
- test_num_samples: 1 |
|
|
|
`100k` |
|
- ratio_wikipedia: 100.00% |
|
- total_num_tokens: 100,099 |
|
- train_num_tokens: 99,537 |
|
- test_num_tokens: 562 |
|
- total_num_samples: 303 |
|
- train_num_samples: 300 |
|
- test_num_samples: 3 |
|
|
|
`1M` |
|
- ratio_wikipedia: 100.00% |
|
- total_num_tokens: 1,000,104 |
|
- train_num_tokens: 987,432 |
|
- test_num_tokens: 12,672 |
|
- total_num_samples: 2,722 |
|
- train_num_samples: 2,695 |
|
- test_num_samples: 27 |
|
|
|
`10M` |
|
- ratio_wikipedia: 100.00% |
|
- total_num_tokens: 10,000,692 |
|
- train_num_tokens: 9,905,387 |
|
- test_num_tokens: 95,305 |
|
- total_num_samples: 25,641 |
|
- train_num_samples: 25,385 |
|
- test_num_samples: 256 |
|
|
|
`100M` |
|
- ratio_wikipedia: 100.00% |
|
- total_num_tokens: 100,000,049 |
|
- train_num_tokens: 99,022,731 |
|
- test_num_tokens: 977,318 |
|
- total_num_samples: 237,578 |
|
- train_num_samples: 235,203 |
|
- test_num_samples: 2,375 |
|
|
|
`1B` |
|
- ratio_wikipedia: 82.38% |
|
- total_num_tokens: 1,000,000,003 |
|
- train_num_tokens: 990,064,856 |
|
- test_num_tokens: 9,935,147 |
|
- total_num_samples: 2,869,233 |
|
- train_num_samples: 2,840,541 |
|
- test_num_samples: 28,692 |
|
|
|
`5B` |
|
- ratio_wikipedia: 35.62% |
|
- total_num_tokens: 5,000,000,224 |
|
- train_num_tokens: 4,974,586,006 |
|
- test_num_tokens: 25,414,218 |
|
- total_num_samples: 12,603,939 |
|
- train_num_samples: 12,539,939 |
|
- test_num_samples: 64,000 |
|
|
|
`10B` |
|
- ratio_wikipedia: 26.86% |
|
- total_num_tokens: 10,000,000,658 |
|
- train_num_tokens: 9,973,803,589 |
|
- test_num_tokens: 26,197,069 |
|
- total_num_samples: 24,628,921 |
|
- train_num_samples: 24,564,921 |
|
- test_num_samples: 64,000 |
|
|
|
`15B` |
|
- ratio_wikipedia: 23.85% |
|
- total_num_tokens: 15,000,001,092 |
|
- train_num_tokens: 14,973,654,717 |
|
- test_num_tokens: 26,346,375 |
|
- total_num_samples: 36,653,903 |
|
- train_num_samples: 36,589,903 |
|
- test_num_samples: 64,000 |
|
|
|
`20B` |
|
- ratio_wikipedia: 22.32% |
|
- total_num_tokens: 20,000,000,303 |
|
- train_num_tokens: 19,973,764,973 |
|
- test_num_tokens: 26,235,330 |
|
- total_num_samples: 48,678,883 |
|
- train_num_samples: 48,614,883 |
|
- test_num_samples: 64,000 |
|
|
|
`25B` |
|
- ratio_wikipedia: 21.40% |
|
- total_num_tokens: 25,000,000,737 |
|
- train_num_tokens: 24,973,747,815 |
|
- test_num_tokens: 26,252,922 |
|
- total_num_samples: 60,703,865 |
|
- train_num_samples: 60,639,865 |
|
- test_num_samples: 64,000 |
|
|
|
`30B` |
|
- ratio_wikipedia: 20.79% |
|
- total_num_tokens: 30,000,000,034 |
|
- train_num_tokens: 29,973,830,841 |
|
- test_num_tokens: 26,169,193 |
|
- total_num_samples: 72,728,846 |
|
- train_num_samples: 72,664,846 |
|
- test_num_samples: 64,000 |
|
|
|
`35B` |
|
- ratio_wikipedia: 20.35% |
|
- total_num_tokens: 35,000,000,468 |
|
- train_num_tokens: 34,973,480,399 |
|
- test_num_tokens: 26,520,069 |
|
- total_num_samples: 84,753,828 |
|
- train_num_samples: 84,689,828 |
|
- test_num_samples: 64,000 |