{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "YxH4gsO3zB3E" }, "source": [ "# streaming dataset sampler\n", "\n", "for https://hf.co/datasets/LLM360/TxT360\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "KyXvK3HQ4MYr", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "a2ef8c9e-c6d8-44eb-814b-2b1079b5e29b" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "96\n", " total used free shared buff/cache available\n", "Mem: 334Gi 1.9Gi 315Gi 2.0Mi 16Gi 330Gi\n", "Swap: 0B 0B 0B\n" ] } ], "source": [ "!nproc\n", "!free -h" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "lIYdn1woOS1n", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "e5acae68-95dc-46f8-dc91-c9ba673a570b" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m307.2/307.2 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m44.4/44.4 kB\u001b[0m \u001b[31m1.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m52.8/52.8 kB\u001b[0m \u001b[31m2.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m9.9/9.9 MB\u001b[0m \u001b[31m77.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m471.6/471.6 kB\u001b[0m \u001b[31m22.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m330.9/330.9 kB\u001b[0m \u001b[31m19.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m116.3/116.3 kB\u001b[0m \u001b[31m8.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m177.6/177.6 kB\u001b[0m \u001b[31m14.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.2/1.2 MB\u001b[0m \u001b[31m53.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.9/2.9 MB\u001b[0m \u001b[31m72.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m9.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m194.1/194.1 kB\u001b[0m \u001b[31m13.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m239.5/239.5 kB\u001b[0m \u001b[31m16.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m124.6/124.6 kB\u001b[0m \u001b[31m8.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m309.6/309.6 kB\u001b[0m \u001b[31m18.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m208.9/208.9 kB\u001b[0m \u001b[31m13.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h" ] } ], "source": [ "!pip install -q ninja\n", "!pip install -U -q transformers datasets accelerate sentencepiece\n", "from huggingface_hub import notebook_login\n", "# notebook_login()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "8WAZVp76gIGi", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "3db2896f-f541-44fa-e071-0a390325d0a2" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m73.4/73.4 kB\u001b[0m \u001b[31m2.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.2/1.2 MB\u001b[0m \u001b[31m18.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Building wheel for fasttext-langdetect (setup.py) ... \u001b[?25l\u001b[?25hdone\n", " Building wheel for fasttext (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n" ] } ], "source": [ "!pip install fasttext-langdetect tiktoken -q" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "sUKyBuj-ssql" }, "outputs": [], "source": [ "import tiktoken\n", "enc = tiktoken.get_encoding(\"cl100k_base\")\n", "assert enc.decode(enc.encode(\"hello world\")) == \"hello world\"\n", "\n", "# To get the tokeniser corresponding to a specific model in the OpenAI API:\n", "enc = tiktoken.encoding_for_model(\"gpt-4\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "TTrcHl3reJfw", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "0d33b152-b01b-4a65-eaa6-1258d3ece3bb" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "/usr/local/lib/python3.10/dist-packages/torch_xla/__init__.py:202: UserWarning: `tensorflow` can conflict with `torch-xla`. Prefer `tensorflow-cpu` when using PyTorch/XLA. To silence this warning, `pip uninstall -y tensorflow && pip install tensorflow-cpu`. If you are in a notebook environment such as Colab or Kaggle, restart your notebook runtime afterwards.\n", " warnings.warn(\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "min_length_tokens: 256\n", "max_length_tokens: 8192\n" ] } ], "source": [ "from pathlib import Path\n", "import random\n", "import os\n", "from datasets import Dataset, load_dataset\n", "from tqdm.auto import tqdm\n", "from transformers import AutoTokenizer\n", "from datasets import DownloadConfig\n", "from ftlangdetect import detect\n", "import re\n", "\n", "MIN_LENGTH_TOKENS = 256\n", "MAX_LENGTH_TOKENS = 8192\n", "\n", "cfg = DownloadConfig(max_retries=20, extract_on_the_fly=True)\n", "\n", "print(f\"min_length_tokens: {MIN_LENGTH_TOKENS}\")\n", "print(f\"max_length_tokens: {MAX_LENGTH_TOKENS}\")\n", "\n", "# @title define core fns\n", "\n", "def get_workers():\n", " return max(1, int(os.cpu_count() //2))\n", "\n", "def normalize_text(text):\n", " \"\"\"\n", " # Example usage\n", " original_text = \"This is a\\nsample text with\\n\\nmultiple spaces.\"\n", " normalized_text = normalize_text(original_text)\n", " print(normalized_text)\n", "\n", " Args:\n", " text (TYPE): Description\n", "\n", " Returns:\n", " TYPE: Description\n", " \"\"\"\n", " text = str(text)\n", " # Remove newlines\n", " text = text.replace(\"\\n\", \" \")\n", "\n", " # Normalize whitespace\n", " text = re.sub(r\"\\s+\", \" \", text)\n", "\n", " return text\n", "\n", "\n", "def get_lang(text):\n", " \"\"\"langdetect wrapper\n", "\n", " Args:\n", " example (TYPE): Description\n", "\n", " Returns:\n", " TYPE: Description\n", " \"\"\"\n", " text = str(text)\n", "\n", " if len(text) < 3:\n", " return \"unknown\"\n", "\n", " try:\n", " return detect(text=normalize_text(text)[:5000])[\"lang\"]\n", " except Exception as e:\n", " return \"unknown-error\"\n", "\n", "\n", "def is_en(text: str):\n", "\n", " return get_lang(text) == \"en\"\n", "\n", "\n", "def is_length_appropriate(\n", " text,\n", " tokenizer,\n", " min_length: int = MIN_LENGTH_TOKENS,\n", " max_length: int = MAX_LENGTH_TOKENS,\n", "):\n", " \"\"\"\n", " Check if the tokenized text length falls within specified bounds.\n", "\n", " Args:\n", " text (str): The text to be tokenized.\n", " tokenizer: Tokenizer object.\n", " min_length (int): Minimum token length.\n", " max_length (int): Maximum token length.\n", "\n", " No Longer Returned:\n", " bool: True if text length is appropriate, False otherwise.\n", " \"\"\"\n", " tokenized = enc.encode(text)\n", " token_length = len(tokenized)\n", " return min_length < token_length < max_length\n", "\n", "\n", "def save_dataset(dataset, save_path):\n", " \"\"\"\n", " Save the dataset to the specified path.\n", "\n", " Args:\n", " dataset: The dataset to save.\n", " save_path (str): Path to save the dataset.\n", " \"\"\"\n", " save_path = Path(save_path)\n", " dataset.save_to_disk(str(save_path), num_proc=get_workers())\n", " print(f\"Dataset saved to {save_path}\")\n", "\n", "\n", "def main(\n", " dataset_name,\n", " config_name=\"default\",\n", " text_column=\"text\",\n", " save_path: str = None,\n", " tokenizer_name=\"meta-llama/Meta-Llama-3-8B\",\n", " check_lang: bool = True,\n", " max_samples=500,\n", " min_length_chars: int = None,\n", "):\n", " \"\"\"\n", " Main function to process and save the dataset.\n", "\n", " Args:\n", " dataset_name (str): Name of the dataset.\n", " config_name (str, optional): Description\n", " text_column (str, optional): Description\n", " save_path (str, optional): Path to save the processed dataset.\n", " tokenizer_name (str): Name of the tokenizer.\n", " max_samples (int): Maximum number of samples to save.\n", " \"\"\"\n", " ds_short_name = dataset_name.split(\"/\")[-1]\n", " min_length_chars = min_length_chars or MIN_LENGTH_TOKENS\n", "\n", " # Load the tokenizer\n", " tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n", "\n", " # Load the dataset in streaming mode\n", " streaming_dataset = load_dataset(\n", " dataset_name, config_name, split=\"train\", streaming=True,\n", " download_config=cfg\n", " ).shuffle(seed=random.SystemRandom().randint(69, 2**32) - 1)\n", "\n", " # Initialize variables for filtered data\n", " filtered_texts = []\n", "\n", " # Process the dataset\n", " for sample in tqdm(streaming_dataset, desc=\"Processing Dataset\"):\n", " try:\n", " if min_length_chars:\n", " # fast check first\n", " if len(sample[text_column]) < min_length_chars:\n", " continue\n", "\n", " # check length\n", " if is_length_appropriate(sample[text_column], tokenizer):\n", "\n", " filtered_texts.append(sample)\n", " except Exception as e:\n", " _txt = str(sample[text_column])[:200]\n", " print(f\"Error checking sample: {_txt} ...:\\t{e}\")\n", " # Stop after collecting max_samples\n", " if len(filtered_texts) == max_samples:\n", " print(f\"hit max samples: {max_samples}\")\n", " break\n", "\n", " # Create a dataset from the filtered texts\n", " # filtered_dataset = Dataset.from_dict({\"text\": filtered_texts})\n", " filtered_dataset = Dataset.from_list(filtered_texts)\n", "\n", " # Save the dataset\n", " max_samples = len(filtered_texts)\n", " save_path = (\n", " save_path\n", " or Path(\"exported-datasets\") / f\"{ds_short_name}_filtered-{max_samples}-samples\"\n", " )\n", " save_path = Path(save_path)\n", " save_path.mkdir(parents=True, exist_ok=True)\n", " print(f\"Saving dataset to {save_path}...\")\n", " save_dataset(filtered_dataset, save_path)\n", "\n", " print(\"done!\")\n", "\n", " return save_path" ] }, { "cell_type": "markdown", "metadata": { "id": "ubPxNmo3sHPq" }, "source": [ "# run sampling" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "5cDeP2nYe-Q5", "colab": { "base_uri": "https://localhost:8080/", "height": 345, "referenced_widgets": [ "e65bf163bd494c4ebde30cfb24f6270f", "5e4028ca94b64a6897c5762532c543c6", "e7628b124ba74bbeaa6200f6d8564e3d", "0099125bba2d4b78ad8932e73138bf48", "ff192c0b9abf4e4fb6c884ca029dff83", "d4d484c94020400e940dbb213b8c96d3", "5c5dbf2547f34b71ae9ca5b5f4bba5aa", "1dbe0ca39261450da1a8924dd97e2670", "a0e1adb07b5e47a7afb4d9f8904693c2", "34a7da6a99074af9bbe35c9f01fa88f1", "25b69783e632402192924b012a3ac03d", "554b8cf5195f41e7bd397b79dba7f0c7", "b5a289fd31a94370a80ffe159b6efbd8", "a337a4b27d854ac5be83046f85106d47", "533b9aceb3834bfa8033b202d5569d0d", "55a8f2ee652d4eb08612791f2abaf5fc", "18e6b5fcb3c24d5a990756c1b77deec7", "d963f0d7247b4caaa5a2cd0f1212e10d", "356052b46f434c5eb53afa2d5650b135", "e93b025271534d43ba1d79056dcf1a48", "0ce0f41c3bcf4a05a2185a32d9f65f7e", "6419ea5c41ef4e63b52b6966acf2674f", "35e69a88a77142538dece16f1ae2dc16", "ccf4ab8e79e747b3980243139a067d88", "4396c4ff088049ac9111d4f1da83d461", "d0b034f9f8d54067bcc7aea301f4cca0", "06adecf0d9b541ae9c49c928a63cc16f", "16b7fbe0319a4afebc06a992d9643814", "410f4540c0204ef0b96ced900241a0a6", "21cbac56ad87437a8da484c199ca19c0", "a7b5bb74e0de43b5ae50ab9a5b5de387", "3fd11272672a45bea8271d37cb60c0c6", "6fe61adcb8a1495684d7f71b8d28a47e", "49c1fef053114ea997583e79ea0b7da5", "4d02dcd774b0484c8e440852736e774c", "09ccaa11ef4946c9a29d06b8376f7e1c", "ccdae2b8553546199eeaef6bc051fa2d", "a42be9aa7217455daf8b76ec04f6fa3d", "590cbfe9abec4774bfa5c1d7f71a9321", "982f5b8bee6d467b84eea3e2776961a3", "74bf08964b7243c394f9d39b6f5144dd", "346a3259ae2d4631806ff2c61e4197a8", "0b0e243e9270472299b40e229ac4fb26", "030c9a42da32430da2983799aff25a55", "fc6a4a69fe6144608694e825f51680ae", "bb02721a6101409b9492c4a41b6c0973", "c0c20d972a3d49638b3f49718bb62b04", "e12ddd614c0f44199e35b75f6e185a24", "cd5b04d59bf5443db676954e79e5afcd", "81ece27d77f74b8b8887d50f48cf9ebb", "a473808588e241278d779a55cb393034", "405e8862f54240d3a1f05a8bba575b79", "984767ce63ff42589490d7cf051be8d6", "9cea423c79814af5a3ffeb08b48a9a9b", "fbb75ddb51b042ef9f449eb2d4553956", "dc476ee1bc0940c785ee4447b65003f3", "569e74add7044d3883d75290a15d8288", "1d03a7016a38473e8f0635e91333738e", "005805ad12ce4edf9c28700cce32da24", "15a0d42a86104357863e0083484efbe8", "cf20dc975c284145892d0191188d86b3", "1f22648fc9d74728ada7650325087d48", "d322188029e54a439e86c466a6236d03", "3e00f1119c30433ab20b0c02c01bcae0", "aa58fa50dd1546fe92dcdd9c01140959", "fbc3962919634448ba36b95e95095def", "862a11184e684616bbd8200f6f6c9202", "2da7f362e8284615bc215debe8fadcb9", "9ea553041ae64f828def949e6a904cd9", "fbc10d5d22b24f9bbf664147be9ef215", "8bbe009e86154b2da876d6ee83d270d5", "1b596784bc3d48fab72ee24be88822b4", "22159405b2124f96bf3aaa34379cac5b", "9a41cb658d284fc7950d352e52b3f095", "e4acddde09e14f3195342606a2a1e1dc", "82b572779e5b4513ab6a6cee530ce882", "ece3a2c2a2a24505ab4baab77e9898ae" ] }, "outputId": "cd04b94e-e56a-45c1-de9c-3da55228034a" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "LLM360/TxT360\n", "max_samples:\t1000000\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "tokenizer_config.json: 0%| | 0.00/50.6k [00:00