--- language: - nl size_categories: - 10B 20 ## Bad words ```python BAD_PHRASES_DOC_LEVEL = { # https://en.wikipedia.org/wiki/Dutch_profanity "achterlijk", "debiel", "downie", "idioot", "kankerlijer", "klere", "kolere", "minkukel", "pestkop", "pleuris", "pleuritis", "teringlijer", "tyfuslijer", "gadver", "getver", "godver", "godskolere", "godverork", "graftak", "kopvod", "verdomme", "anaalgeneraal", "bitch", "dikzak", "flikker", "fok", "fuck", "hoer", "klootzak", "klote", "kreng", "kringspiermusketier", "kut", "lamzak", "lul", "manwijf", "matennaai", "neuken", "neuker", "ouwehoer", "reet", "reetkever", "reetridder", "rotzak", "schijt", "shit", "slet", "slijmbal", "slons", "sodemieter", "stoephoer", "swaffel", "teef", "trut", "tut", "zak", "uilskuiken", "zeik", "bamivreter", "bosneger", "neger", "fransoos", "geitenneuker", "kaaskop", "kakker", "koelie", "lijp", "medelander", "mocro", "mof", "nikker", "poepchinees", "roetmop", "spaghettivreter", "loempiavouwer", "spanjool", "spleetoog", "tatta", "tokkie", "zandneger", "zwartzak", "halvezool", "kenau", "klootviool", "knuppel", "koekert", "koekwaus", "oelewapper", "smeerlap", "sukkel", "sul", "wappie", "wijf", "zooi", # xxx (a.o. https://gitlab.com/yhavinga/c4nlpreproc/-/blob/master/clean/badwords_ennl.py?ref_type=heads) "xxx", "anal", "blowjob", "buttplug", "cock", "cunt", "geil", "sex", # Standaardnederlands = seks, maybe we catch some porn or socialmedia sites with this misspelling "porn", # extra "nigger", "nigga", "hoerig", "klojo", } ``` ## Config details `10k` - ratio_wikipedia: 100.00% - total_num_tokens: 10,078 - train_num_tokens: 9,957 - test_num_tokens: 121 - total_num_samples: 38 - train_num_samples: 37 - test_num_samples: 1 `100k` - ratio_wikipedia: 100.00% - total_num_tokens: 100,099 - train_num_tokens: 99,537 - test_num_tokens: 562 - total_num_samples: 303 - train_num_samples: 300 - test_num_samples: 3 `1M` - ratio_wikipedia: 100.00% - total_num_tokens: 1,000,104 - train_num_tokens: 987,432 - test_num_tokens: 12,672 - total_num_samples: 2,722 - train_num_samples: 2,695 - test_num_samples: 27 `10M` - ratio_wikipedia: 100.00% - total_num_tokens: 10,000,692 - train_num_tokens: 9,905,387 - test_num_tokens: 95,305 - total_num_samples: 25,641 - train_num_samples: 25,385 - test_num_samples: 256 `100M` - ratio_wikipedia: 100.00% - total_num_tokens: 100,000,049 - train_num_tokens: 99,022,731 - test_num_tokens: 977,318 - total_num_samples: 237,578 - train_num_samples: 235,203 - test_num_samples: 2,375 `1B` - ratio_wikipedia: 82.38% - total_num_tokens: 1,000,000,003 - train_num_tokens: 990,064,856 - test_num_tokens: 9,935,147 - total_num_samples: 2,869,233 - train_num_samples: 2,840,541 - test_num_samples: 28,692 `5B` - ratio_wikipedia: 35.62% - total_num_tokens: 5,000,000,224 - train_num_tokens: 4,974,586,006 - test_num_tokens: 25,414,218 - total_num_samples: 12,603,939 - train_num_samples: 12,539,939 - test_num_samples: 64,000 `10B` - ratio_wikipedia: 26.86% - total_num_tokens: 10,000,000,658 - train_num_tokens: 9,973,803,589 - test_num_tokens: 26,197,069 - total_num_samples: 24,628,921 - train_num_samples: 24,564,921 - test_num_samples: 64,000 `15B` - ratio_wikipedia: 23.85% - total_num_tokens: 15,000,001,092 - train_num_tokens: 14,973,654,717 - test_num_tokens: 26,346,375 - total_num_samples: 36,653,903 - train_num_samples: 36,589,903 - test_num_samples: 64,000 `20B` - ratio_wikipedia: 22.32% - total_num_tokens: 20,000,000,303 - train_num_tokens: 19,973,764,973 - test_num_tokens: 26,235,330 - total_num_samples: 48,678,883 - train_num_samples: 48,614,883 - test_num_samples: 64,000 `25B` - ratio_wikipedia: 21.40% - total_num_tokens: 25,000,000,737 - train_num_tokens: 24,973,747,815 - test_num_tokens: 26,252,922 - total_num_samples: 60,703,865 - train_num_samples: 60,639,865 - test_num_samples: 64,000 `30B` - ratio_wikipedia: 20.79% - total_num_tokens: 30,000,000,034 - train_num_tokens: 29,973,830,841 - test_num_tokens: 26,169,193 - total_num_samples: 72,728,846 - train_num_samples: 72,664,846 - test_num_samples: 64,000 `35B` - ratio_wikipedia: 20.35% - total_num_tokens: 35,000,000,468 - train_num_tokens: 34,973,480,399 - test_num_tokens: 26,520,069 - total_num_samples: 84,753,828 - train_num_samples: 84,689,828 - test_num_samples: 64,000 ## License information For CulturaX: https://huggingface.co/datasets/uonlp/CulturaX#license-information For Wikipedia: https://huggingface.co/datasets/wikimedia/wikipedia#licensing-information