{ "cells": [ { "cell_type": "code", "source": [], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "kLmXfHXy80BY", "outputId": "d99480a2-f2a2-43ec-fe12-fce0a740bae3" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "/content\n" ] } ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "id": "VjYy0F2gZIPR", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "ecb00576-852b-4494-cbb9-7d0909a3b738" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "The following additional packages will be installed:\n", " libaria2-0 libc-ares2\n", "The following NEW packages will be installed:\n", " aria2 libaria2-0 libc-ares2\n", "0 upgraded, 3 newly installed, 0 to remove and 18 not upgraded.\n", "Need to get 1,513 kB of archives.\n", "After this operation, 5,441 kB of additional disk space will be used.\n", "Selecting previously unselected package libc-ares2:amd64.\n", "(Reading database ... 124950 files and directories currently installed.)\n", "Preparing to unpack .../libc-ares2_1.18.1-1ubuntu0.22.04.3_amd64.deb ...\n", "Unpacking libc-ares2:amd64 (1.18.1-1ubuntu0.22.04.3) ...\n", "Selecting previously unselected package libaria2-0:amd64.\n", "Preparing to unpack .../libaria2-0_1.36.0-1_amd64.deb ...\n", "Unpacking libaria2-0:amd64 (1.36.0-1) ...\n", "Selecting previously unselected package aria2.\n", "Preparing to unpack .../aria2_1.36.0-1_amd64.deb ...\n", "Unpacking aria2 (1.36.0-1) ...\n", "Setting up libc-ares2:amd64 (1.18.1-1ubuntu0.22.04.3) ...\n", "Setting up libaria2-0:amd64 (1.36.0-1) ...\n", "Setting up aria2 (1.36.0-1) ...\n", "Processing triggers for man-db (2.10.2-1) ...\n", "Processing triggers for libc-bin (2.35-0ubuntu3.8) ...\n", "/sbin/ldconfig.real: /usr/local/lib/libtbb.so.12 is not a symbolic link\n", "\n", "/sbin/ldconfig.real: /usr/local/lib/libhwloc.so.15 is not a symbolic link\n", "\n", "/sbin/ldconfig.real: /usr/local/lib/libur_adapter_opencl.so.0 is not a symbolic link\n", "\n", "/sbin/ldconfig.real: /usr/local/lib/libtbbmalloc_proxy.so.2 is not a symbolic link\n", "\n", "/sbin/ldconfig.real: /usr/local/lib/libtbbbind_2_5.so.3 is not a symbolic link\n", "\n", "/sbin/ldconfig.real: /usr/local/lib/libtbbmalloc.so.2 is not a symbolic link\n", "\n", "/sbin/ldconfig.real: /usr/local/lib/libur_adapter_level_zero.so.0 is not a symbolic link\n", "\n", "/sbin/ldconfig.real: /usr/local/lib/libtbbbind_2_0.so.3 is not a symbolic link\n", "\n", "/sbin/ldconfig.real: /usr/local/lib/libur_loader.so.0 is not a symbolic link\n", "\n", "/sbin/ldconfig.real: /usr/local/lib/libumf.so.0 is not a symbolic link\n", "\n", "/sbin/ldconfig.real: /usr/local/lib/libtcm.so.1 is not a symbolic link\n", "\n", "/sbin/ldconfig.real: /usr/local/lib/libtbbbind.so.3 is not a symbolic link\n", "\n", "/sbin/ldconfig.real: /usr/local/lib/libtcm_debug.so.1 is not a symbolic link\n", "\n", "\n", "Download Results:\n", "gid |stat|avg speed |path/URI\n", "======+====+===========+=======================================================\n", "55e009|\u001b[1;32mOK\u001b[0m | 91KiB/s|/content/joy/text_model/adapter_config.json\n", "\n", "Status Legend:\n", "(OK):download completed.\n", "\u001b[0m\n", "Download Results:\n", "gid |stat|avg speed |path/URI\n", "======+====+===========+=======================================================\n", "293d1a|\u001b[1;32mOK\u001b[0m | 77MiB/s|/content/joy/text_model/adapter_model.safetensors\n", "\n", "Status Legend:\n", "(OK):download completed.\n", "\u001b[0m\n", "Download Results:\n", "gid |stat|avg speed |path/URI\n", "======+====+===========+=======================================================\n", "dd9439|\u001b[1;32mOK\u001b[0m | 85MiB/s|/content/joy/clip_model.pt\n", "\n", "Status Legend:\n", "(OK):download completed.\n", "\n", "Download Results:\n", "gid |stat|avg speed |path/URI\n", "======+====+===========+=======================================================\n", "e49830|\u001b[1;32mOK\u001b[0m | 53KiB/s|/content/joy/config.yaml\n", "\n", "Status Legend:\n", "(OK):download completed.\n", "\u001b[0m\n", "Download Results:\n", "gid |stat|avg speed |path/URI\n", "======+====+===========+=======================================================\n", "f76217|\u001b[1;32mOK\u001b[0m | 46MiB/s|/content/joy/image_adapter.pt\n", "\n", "Status Legend:\n", "(OK):download completed.\n", "Requirement already satisfied: peft in /usr/local/lib/python3.11/dist-packages (0.14.0)\n", "Collecting bitsandbytes\n", " Downloading bitsandbytes-0.45.1-py3-none-manylinux_2_24_x86_64.whl.metadata (5.8 kB)\n", "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.11/dist-packages (from peft) (1.26.4)\n", "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.11/dist-packages (from peft) (24.2)\n", "Requirement already satisfied: psutil in /usr/local/lib/python3.11/dist-packages (from peft) (5.9.5)\n", "Requirement already satisfied: pyyaml in /usr/local/lib/python3.11/dist-packages (from peft) (6.0.2)\n", "Requirement already satisfied: torch>=1.13.0 in /usr/local/lib/python3.11/dist-packages (from peft) (2.5.1+cu124)\n", "Requirement already satisfied: transformers in /usr/local/lib/python3.11/dist-packages (from peft) (4.47.1)\n", "Requirement already satisfied: tqdm in /usr/local/lib/python3.11/dist-packages (from peft) (4.67.1)\n", "Requirement already satisfied: accelerate>=0.21.0 in /usr/local/lib/python3.11/dist-packages (from peft) (1.2.1)\n", "Requirement already satisfied: safetensors in /usr/local/lib/python3.11/dist-packages (from peft) (0.5.2)\n", "Requirement already satisfied: huggingface-hub>=0.25.0 in /usr/local/lib/python3.11/dist-packages (from peft) (0.27.1)\n", "Requirement already satisfied: filelock in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.25.0->peft) (3.17.0)\n", "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.25.0->peft) (2024.10.0)\n", "Requirement already satisfied: requests in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.25.0->peft) (2.32.3)\n", "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.25.0->peft) (4.12.2)\n", "Requirement already satisfied: networkx in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (3.4.2)\n", "Requirement already satisfied: jinja2 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (3.1.5)\n", "Collecting nvidia-cuda-nvrtc-cu12==12.4.127 (from torch>=1.13.0->peft)\n", " Downloading nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl.metadata (1.5 kB)\n", "Collecting nvidia-cuda-runtime-cu12==12.4.127 (from torch>=1.13.0->peft)\n", " Downloading nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl.metadata (1.5 kB)\n", "Collecting nvidia-cuda-cupti-cu12==12.4.127 (from torch>=1.13.0->peft)\n", " Downloading nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl.metadata (1.6 kB)\n", "Collecting nvidia-cudnn-cu12==9.1.0.70 (from torch>=1.13.0->peft)\n", " Downloading nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl.metadata (1.6 kB)\n", "Collecting nvidia-cublas-cu12==12.4.5.8 (from torch>=1.13.0->peft)\n", " Downloading nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl.metadata (1.5 kB)\n", "Collecting nvidia-cufft-cu12==11.2.1.3 (from torch>=1.13.0->peft)\n", " Downloading nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl.metadata (1.5 kB)\n", "Collecting nvidia-curand-cu12==10.3.5.147 (from torch>=1.13.0->peft)\n", " Downloading nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_x86_64.whl.metadata (1.5 kB)\n", "Collecting nvidia-cusolver-cu12==11.6.1.9 (from torch>=1.13.0->peft)\n", " Downloading nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl.metadata (1.6 kB)\n", "Collecting nvidia-cusparse-cu12==12.3.1.170 (from torch>=1.13.0->peft)\n", " Downloading nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl.metadata (1.6 kB)\n", "Requirement already satisfied: nvidia-nccl-cu12==2.21.5 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (2.21.5)\n", "Requirement already satisfied: nvidia-nvtx-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.127)\n", "Collecting nvidia-nvjitlink-cu12==12.4.127 (from torch>=1.13.0->peft)\n", " Downloading nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl.metadata (1.5 kB)\n", "Requirement already satisfied: triton==3.1.0 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (3.1.0)\n", "Requirement already satisfied: sympy==1.13.1 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (1.13.1)\n", "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.11/dist-packages (from sympy==1.13.1->torch>=1.13.0->peft) (1.3.0)\n", "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.11/dist-packages (from transformers->peft) (2024.11.6)\n", "Requirement already satisfied: tokenizers<0.22,>=0.21 in /usr/local/lib/python3.11/dist-packages (from transformers->peft) (0.21.0)\n", "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.11/dist-packages (from jinja2->torch>=1.13.0->peft) (3.0.2)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests->huggingface-hub>=0.25.0->peft) (3.4.1)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.11/dist-packages (from requests->huggingface-hub>=0.25.0->peft) (3.10)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/dist-packages (from requests->huggingface-hub>=0.25.0->peft) (2.3.0)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.11/dist-packages (from requests->huggingface-hub>=0.25.0->peft) (2024.12.14)\n", "Downloading bitsandbytes-0.45.1-py3-none-manylinux_2_24_x86_64.whl (69.7 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m69.7/69.7 MB\u001b[0m \u001b[31m14.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl (363.4 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m363.4/363.4 MB\u001b[0m \u001b[31m4.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl (13.8 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.8/13.8 MB\u001b[0m \u001b[31m18.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl (24.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m24.6/24.6 MB\u001b[0m \u001b[31m34.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl (883 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m883.7/883.7 kB\u001b[0m \u001b[31m51.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl (664.8 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m664.8/664.8 MB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl (211.5 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m211.5/211.5 MB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_x86_64.whl (56.3 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m56.3/56.3 MB\u001b[0m \u001b[31m20.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl (127.9 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m127.9/127.9 MB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl (207.5 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m207.5/207.5 MB\u001b[0m \u001b[31m6.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl (21.1 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.1/21.1 MB\u001b[0m \u001b[31m86.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hInstalling collected packages: nvidia-nvjitlink-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, nvidia-cusparse-cu12, nvidia-cudnn-cu12, nvidia-cusolver-cu12, bitsandbytes\n", " Attempting uninstall: nvidia-nvjitlink-cu12\n", " Found existing installation: nvidia-nvjitlink-cu12 12.5.82\n", " Uninstalling nvidia-nvjitlink-cu12-12.5.82:\n", " Successfully uninstalled nvidia-nvjitlink-cu12-12.5.82\n", " Attempting uninstall: nvidia-curand-cu12\n", " Found existing installation: nvidia-curand-cu12 10.3.6.82\n", " Uninstalling nvidia-curand-cu12-10.3.6.82:\n", " Successfully uninstalled nvidia-curand-cu12-10.3.6.82\n", " Attempting uninstall: nvidia-cufft-cu12\n", " Found existing installation: nvidia-cufft-cu12 11.2.3.61\n", " Uninstalling nvidia-cufft-cu12-11.2.3.61:\n", " Successfully uninstalled nvidia-cufft-cu12-11.2.3.61\n", " Attempting uninstall: nvidia-cuda-runtime-cu12\n", " Found existing installation: nvidia-cuda-runtime-cu12 12.5.82\n", " Uninstalling nvidia-cuda-runtime-cu12-12.5.82:\n", " Successfully uninstalled nvidia-cuda-runtime-cu12-12.5.82\n", " Attempting uninstall: nvidia-cuda-nvrtc-cu12\n", " Found existing installation: nvidia-cuda-nvrtc-cu12 12.5.82\n", " Uninstalling nvidia-cuda-nvrtc-cu12-12.5.82:\n", " Successfully uninstalled nvidia-cuda-nvrtc-cu12-12.5.82\n", " Attempting uninstall: nvidia-cuda-cupti-cu12\n", " Found existing installation: nvidia-cuda-cupti-cu12 12.5.82\n", " Uninstalling nvidia-cuda-cupti-cu12-12.5.82:\n", " Successfully uninstalled nvidia-cuda-cupti-cu12-12.5.82\n", " Attempting uninstall: nvidia-cublas-cu12\n", " Found existing installation: nvidia-cublas-cu12 12.5.3.2\n", " Uninstalling nvidia-cublas-cu12-12.5.3.2:\n", " Successfully uninstalled nvidia-cublas-cu12-12.5.3.2\n", " Attempting uninstall: nvidia-cusparse-cu12\n", " Found existing installation: nvidia-cusparse-cu12 12.5.1.3\n", " Uninstalling nvidia-cusparse-cu12-12.5.1.3:\n", " Successfully uninstalled nvidia-cusparse-cu12-12.5.1.3\n", " Attempting uninstall: nvidia-cudnn-cu12\n", " Found existing installation: nvidia-cudnn-cu12 9.3.0.75\n", " Uninstalling nvidia-cudnn-cu12-9.3.0.75:\n", " Successfully uninstalled nvidia-cudnn-cu12-9.3.0.75\n", " Attempting uninstall: nvidia-cusolver-cu12\n", " Found existing installation: nvidia-cusolver-cu12 11.6.3.83\n", " Uninstalling nvidia-cusolver-cu12-11.6.3.83:\n", " Successfully uninstalled nvidia-cusolver-cu12-11.6.3.83\n", "Successfully installed bitsandbytes-0.45.1 nvidia-cublas-cu12-12.4.5.8 nvidia-cuda-cupti-cu12-12.4.127 nvidia-cuda-nvrtc-cu12-12.4.127 nvidia-cuda-runtime-cu12-12.4.127 nvidia-cudnn-cu12-9.1.0.70 nvidia-cufft-cu12-11.2.1.3 nvidia-curand-cu12-10.3.5.147 nvidia-cusolver-cu12-11.6.1.9 nvidia-cusparse-cu12-12.3.1.170 nvidia-nvjitlink-cu12-12.4.127\n" ] } ], "source": [ "!apt -y install -qq aria2\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n", "\n", "!pip install peft bitsandbytes" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "id": "EBNKXBwIkJLk", "outputId": "dd28d7b0-064c-4edd-fcbb-b070352970e1", "colab": { "base_uri": "https://localhost:8080/", "height": 836, "referenced_widgets": [ "aa6bcb20909c4dabb4e50cbe669d2e59", "76bc7cdb2b104e45a4003d6c22984ede", "1e061aa7debb41d79ebef10055081c12", "66675c37ea4b49149ead2b347ebaa537", "c3051c4febeb4037bc69d05d43b8cda0", "cf5ab98a09c84fe099dcd0712a6d06cb", "e8b290c91e404cc0bc06b8750f4bd56e", "e6e03ff76c504707be4eb27f162769a1", "9b7e280a64bd401dbbac84654eaee07b", "daea442d834348259866caf9ede23031", "85ae3126bd264e28a168a69b5843f56e", "eaad4156f51542809864313ae0ca6d4b", "9e7f0858f3674014893a375ce0dba0c4", "610c1bad13154c8cbb724a86da4a8abd", "1a2748ed8b0843158e487295cd277a37", "ce84c5b61fe0437588958bba178fb421", "9c94abf070634379bf79a850189a7e20", "4eb4542532f14ea785ef664dcc22f212", "9cec881e50da4f88a730d9408a02068b", "6d934ad9eb41426abcedcdbc24e52045", "68e9cabe2c2b4dfda49e527d7897dba6", "28bd944a6eb34cbd897ca24df9376aed", "a39c5b146c084e83ab577dce631369b7", "88d0b081a21243eb8d4054596e1c7ef9", "53b67df9d4ae4ac9ab48e747d8dcd033", "42cc71df9faf4659aefd3ba24dc9e670", "0d474dbef7d4443e8acedf7a7b79c86b", "c7b17b22a78e4809b4cab83a86ed2965", "e3e5117f373740889a529e673e0ac2c4", "c228a09ca0c44e97bb84fe87f25ec165", "2a374055a6bb4412a493c35958c7e860", "b8ba56c2bd4346379da6d7628a1a6fc9", "07ae6aed5cbd439b990a18e934fbcf2f", "20f386c28e3b4b5c8b2a1d473694f184", "5061bd227d7a4c55823789ccc14c1bb7", "b6130cbb281e4ef5b703fda57fd3722f", "96235cd1bd0e4d689ffed199a5f7961b", "ccd64ee52a114f31b60183be8dd4dd44", "1fb43523676d4bf0933755e0cb378776", "1f32165a5e9e47f4b438e2a7dd7b2127", "56e3d9a26b00431da1b86e5054a9dc70", "bd84808c448e440c9808a8ee13105018", "97a24237256a417f80fc9565b23444b7", "23ee2c81346d4e119a748bb4fc99f9d5", "b10c589bfcd9473fbbe32a165d9984a9", "f66816c9b3a64f52b2711a1d4d704b69", "6fe65841b8604e58a6e838b59e554c70", "124b40be3d5b4e1b891ba2a84f4c1086", "c4f45da24d18425687b09ff05e4a4148", "fb784b2bcf344e48bebb14e5d1abe0a8", "63820c8a4cfc418493387853b399ace9", "575379db79eb46bdae141858567937a4", "04d5767da95c4393b6f632de9de6a8e4", "c7da57ea831a4ff38868e0d90efe88ef", "eabdf3b2e02d49e99e6f90b7c0d651ed", "8cc627f0f2c44d6aac739435d84d18e0", "590e2d1c32f744a9bd2eca814f24930d", "c4caa616ef0c4fb2b27b7b0dc8007694", "b3baa81e3b92443ca101369ee61cdc59", "d526edfee33d4107b3b9bf8ce2a6a5de", "1800d33557ac4f31877d8aa9ccd2843c", "e28e5e0deec042599f3a098f7de344df", "d7cda865776142ddb5fa0e6199f9ece4", "cdf884e8d5874cf39b6c4e36d75e357f", "d0010cba64424a30ae74afbe449f495c", "bb90845af9ef4c7a910e9ed5153960b4", "a5855c924dc743c39b0a60c155fdda96", "aa9ff6b5be1948b9a59bf677607598f3", "00c024acefd9448cb973a520329b5bda", "75ecc2bc46b443ddb3d5b0317c89ab62", "f9db21e92be04beca4ab5f6b7f0d289d", "7816f3b7270d42cd95a69cda53288dca", "d59b61a44a1d4e6cb846d9768903a987", "692282b87f4740deaa4dbdc740497541", "843b7f3e0f5b4403b69e5c694b79aed6", "81727e5eab5149d2a24007f9d59b517b", "39585b15c40b4476889dc98ed110f6ff", "9b2cb8292d53406d97fd89bb0d53aaaa", "e5b077fdb79d4461819413f2d75b00f4", "2cd0959597d84b0184fc9e4b9c5a119e", "36a8d82b88374c72aa6b5eb35d22b1f6", "03262042969345e78d0c722934844214", "bad4cb7eb51042c5b683953ac4ab373b", "bcb524c0e5084a5f8a0eac92b33e386e", "8227b4e0fa924ed6bb3f91e72163004f", "863a287788ea48558e72e15259db9650", "865df9bf621647e7ab51044919c5ac83", "df3450c5249441c2960ce40b72177127", "2900e012356645278c77b9d2c2a1f7ff", "43d676701db24db88dabb37927f005a6", "0696d2c6681c4c719f35d8c70818b8d8", "a1f237be5f9a47f18adbecaedcde48a4", "7666f625e2db42759540ed9b18cb04d6", "aebaa183831e4ef2ab3bcd2fdc06d8fa", "9bb60a5ea23b47d08369a002648068ab", "06fb48a30c3e46609179628588d2bd0d", "48b9353cd83144b4a5b41c7f1a3c4f67", "d19f08ad784c4614a7612f81f5a55706", "2dd3c04db9904878a2eb6d92878e6beb", "8451f18d92e44e6f97b2293b6aa39d49", "a63f7abc9c984796a3e3ce274f7fccef", "2ff9281e97f34af1992af1052b66f611", "6092b8069adf4b9690be177472ba245e", "f2a8f7e2fd8b415c9d9c8c3ddcbfe771", "9d7d3f44d62f4d10866fd7fdd7901eac", "8e3c110936d04d4aa3275c41820e3c9b", "6182c5b765e049a49bc2383cd0e90c9c", "27699bde1e9844a4823d1b0398c42a5f", "31aa337ae12545219135eddc6a0b3ce5", "ceb9bc86a4d049f7a06708f045a07ecd", "3556f5edde894b63ae368fd1fb816592", "2a810a1a7bfd4afa93602fe9c5929b6c", "194660582f7e4f48b280bc5665003a6b", "06e6a7d2a72a4413a9db87976518404c", "3b29bb51812f4013ac53fbf757809157", "2456f731f00b4a00ae179c246b27eef4", "6d925927c88e40a1a0c11ebff9c18c82", "10e8fcc6749940ddb404e1962ec7602f", "5d543ccbcdb1439697a67f9d8dfc9962", "89f6a4183d8b4a09bfdf6d4468ce60f9", "4318346385f543128617b4cdb566e552", "8cc27d8c393f4d42949f45eff03460d7", "41bc31ff01644dd9bc628a74e5986b37", "99e07ad14b9c485c9cf758df4e2d3bc6", "850fdb775c0646bf98ece317e754dbfe", "90662b18c91749898cd7ed15274133be", "ff2485421e1548039c2493796a208eff", "4037f21950c04c96b60248cd62220a25", "2afd5fd4f6a244afbbeb7500ffb8ee67", "962f80ff5b0f451eabeb4a8349071714", "54c42da0e3ff44ed8a9f22e388db2341", "c8a1352e5488458bbad1ecaba23dd1a8", "bab4df97ae5f45a19630bb99c2fc612a", "c5b10f286a2443a48ff82b5b93a70d62", "fe2fe435941848c5b5250b7260f486ff", "fbf9b94ec50342a8ad5d640b61576a07", "638e3190a8f6457d8c6f775032afbfd0", "dd2f5cd6d9a34c1b81ba2eb0ac5b5d0f", "614e827d94b7467d93ff7f9a096e9e58", "d6b8165336f647b09a433784e97e0e73", "7c1724d20c424a008b54058ebb2076ec", "0b4f0424b16a4d408addc971df483f8c", "883d9e3cb1a240d68e7e21d22de0da7d", "907e2db2f8a443af81ad31a4e6b802dc", "655c34951ec446e9ad915a0693ba453d", "9fff9a20614045f99377169971092f53", "e3a80619d7f74905bf71b38ffdef3dca", "5e4de08259254b90a1833e4a52bdef07", "2ed3276064c44a07a5881c478670d8e2", "8de84515a5f8407eb2652bcf0a896d67", "33063916070240c68df4d533ca4d2af6", "9fce9a73f16f4a448857490ca3e0cbe8", "c183c90ab9b74fb3980f93fd3e49fbcf", "ad4c28a5a14c41969c6243bfa5a7f5f5", "b64252230d0a49ba8d7a619b9a4dc2db", "12dea66ec5fc4149a6d76861dbf2a515", "e6c207bcb0a74d26aa41f39067bdab7d", "2e6fc14164ff480f8a0533846ba2e649", "1bea6a2714054136bc5600bf2ad64138", "ac0d3b7bc9f047fdaeb06cccbc9173df", "075ff72eab074d86b90e1666f8b9d67e", "a5c8f180001040e7be951244e2a2f294", "53c8cb09101b49cc8cd72d303e5fcd80", "c0fbd2d628e84234beb4bf658348e52b", "b24f46989aa249da951d75e64d5d4ee4", "815e9798057a4479b207785f98139e21", "306f9a5f25d94c798ccb371d1b397063", "5d808c053d5c4f37b5b165d0de680ce8", "ac0bb5a8d881483eb65563b9243ad3a4", "7be8f121c2814719981c893f9b74016e", "020fcf08f95e4eb78f8a955af164adff", "23534a68fbb143599b0bb028f09ac074", "79f5f27010b44a6a88f90fa9df939f48", "58c80fb3ad92451ba3851af1b1fad0e7", "24442baa68df453386b79acd74878c1d", "65cd68dad7b14ee3b5532fb83968d3cd", "cdfb0aeb78ae45b7a8371d55117eddcf", "71406407dcd447d5b22b7dfb3cdbd439", "70f977ee2a044b75bd8e248aced3096f", "48d4a67ea29d4269ac014c04a7be5215", "46644d29bb62462ab429581d1e8abe66", "ce16728b11b44418adc5e382a3384ee8", "88df5fc3ade9434f9a716f770b02b6ca", "6e69d6a4135741748ad7b0ca0db36c4d", "9680d371e3e84a3a97e1bb7b46d56601", "057a932a422f40a1b1d5d1355382809b", "c6fc4e45e2fd4e02ac86af15ceb0360f", "43cbb633025845e7a6fec832cde23d1a", "ec6aa86b63454691899a09e8da6a2b41", "81607d81cc39426bbc8923a5ffa8975b", "b03ead37155e43ab868eed2e204fe410", "367ffb6144204d3b9c318d4a70affe44", "e8eea123814a4efe9cfd0b61d8a5121a", "4628b4533b8244c88c27169c8d63f533", "a8e4a52013d546d78f6eb03248012350", "72485278675e47958ddbb4649f10ec50", "1b9c1204bd58417db870566c51b6d791", "53003428380f4a0498233c186c5bd63f", "2f4d3f3967614cb69300ce77c4e41e4c", "4823ef44903147168a3d412058570d9e", "d9542330661048c185fce528770962cb", "8a12f81141eb4d7b831d8a45b864c744", "8fb152e385f2415681831138fd72c60f", "aa3ec48011524c2fa0ef348824aec231", "1467088a0b6e46e09d01ba113fdff46c", "4fde733c61124d04a0ba29691efd707d", "d37b8916d8bd41beb18e75a4a454501a", "d0a494b20ac54948b18e666f9097b346", "cca31d76b9db449e9c20107d87a72ca1" ] } }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "/usr/local/lib/python3.11/dist-packages/huggingface_hub/utils/_auth.py:94: UserWarning: \n", "The secret `HF_TOKEN` does not exist in your Colab secrets.\n", "To authenticate with the Hugging Face Hub, create a token in your settings tab (https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session.\n", "You will be able to reuse this secret in all of your notebooks.\n", "Please note that authentication is recommended but still optional to access public models or datasets.\n", " warnings.warn(\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "preprocessor_config.json: 0%| | 0.00/368 [00:00:70: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", " checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "tokenizer_config.json: 0%| | 0.00/50.6k [00:00:83: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", " image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n" ] } ], "source": [ "from huggingface_hub import InferenceClient\n", "from torch import nn\n", "from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n", "import torch\n", "import torch.amp.autocast_mode\n", "from PIL import Image\n", "import os\n", "import torchvision.transforms.functional as TVF\n", "\n", "CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n", "MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B\"\n", "CAPTION_TYPE_MAP = {\n", " (\"descriptive\", \"formal\", False, False): [\"Describe the image in 400 words\"],\n", " (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n", " (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n", " (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n", " (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n", " (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n", " (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n", " (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n", " (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n", " (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n", " (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n", " (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n", "}\n", "\n", "class ImageAdapter(nn.Module):\n", "\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n", "\t\tsuper().__init__()\n", "\t\tself.deep_extract = deep_extract\n", "\t\tif self.deep_extract:\n", "\t\t\tinput_features = input_features * 5\n", "\t\tself.linear1 = nn.Linear(input_features, output_features)\n", "\t\tself.activation = nn.GELU()\n", "\t\tself.linear2 = nn.Linear(output_features, output_features)\n", "\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n", "\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n", "\t\tself.other_tokens = nn.Embedding(3, output_features)\n", "\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n", "\tdef forward(self, vision_outputs: torch.Tensor):\n", "\t\tif self.deep_extract:\n", "\t\t\tx = torch.concat((\n", "\t\t\t\tvision_outputs[-2],\n", "\t\t\t\tvision_outputs[3],\n", "\t\t\t\tvision_outputs[7],\n", "\t\t\t\tvision_outputs[13],\n", "\t\t\t\tvision_outputs[20],\n", "\t\t\t), dim=-1)\n", "\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n", "\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n", "\t\telse:\n", "\t\t\tx = vision_outputs[-2]\n", "\t\tx = self.ln1(x)\n", "\t\tif self.pos_emb is not None:\n", "\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n", "\t\t\tx = x + self.pos_emb\n", "\t\tx = self.linear1(x)\n", "\t\tx = self.activation(x)\n", "\t\tx = self.linear2(x)\n", "\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n", "\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n", "\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n", "\t\treturn x\n", "\tdef get_eot_embedding(self):\n", "\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n", "\n", "clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n", "clip_model = AutoModel.from_pretrained(CLIP_PATH)\n", "clip_model = clip_model.vision_model\n", "checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n", "checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n", "clip_model.load_state_dict(checkpoint)\n", "# del checkpoint\n", "clip_model.eval()\n", "clip_model.requires_grad_(False)\n", "clip_model.to(\"cuda\")\n", "tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, use_fast=False)\n", "assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n", "text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, load_in_8bit=True, device_map=\"auto\", torch_dtype=torch.bfloat16)\n", "text_model.load_adapter(\"/content/joy/text_model\")\n", "text_model.eval()\n", "image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n", "image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n", "image_adapter.eval()\n", "image_adapter.to(\"cuda\")\n", "\n", "@torch.no_grad()\n", "def stream_chat(input_image: Image.Image, caption_type: str, caption_tone: str, caption_length: str | int) -> str:\n", " torch.cuda.empty_cache()\n", " length = None if caption_length == \"any\" else caption_length\n", " if isinstance(length, str):\n", " try:\n", " length = int(length)\n", " except ValueError:\n", " pass\n", " if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n", " caption_tone = \"formal\"\n", " prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n", " if prompt_key not in CAPTION_TYPE_MAP:\n", " raise ValueError(f\"Invalid caption type: {prompt_key}\")\n", " prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n", " print(f\"Prompt: {prompt_str}\")\n", " image = input_image.resize((384, 384), Image.LANCZOS)\n", " pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n", " pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n", " pixel_values = pixel_values.to('cuda')\n", " prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n", " with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n", " vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n", " image_features = vision_outputs.hidden_states\n", " embedded_images = image_adapter(image_features)\n", " embedded_images = embedded_images.to('cuda')\n", " prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n", " assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n", " embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n", " eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n", " inputs_embeds = torch.cat([\n", " embedded_bos.expand(embedded_images.shape[0], -1, -1),\n", " embedded_images.to(dtype=embedded_bos.dtype),\n", " prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n", " eot_embed.expand(embedded_images.shape[0], -1, -1),\n", " ], dim=1)\n", " input_ids = torch.cat([\n", " torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n", " torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n", " prompt,\n", " torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n", " ], dim=1).to('cuda')\n", " attention_mask = torch.ones_like(input_ids)\n", " generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n", " generate_ids = generate_ids[:, input_ids.shape[1]:]\n", " if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n", " generate_ids = generate_ids[:, :-1]\n", " caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n", " caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n", " return caption" ] }, { "cell_type": "code", "source": [ "import os\n", "from PIL import Image\n", "home_directory = '/content/'\n", "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n", "if using_Kaggle : home_directory = '/kaggle/working/'\n", "%cd {home_directory}\n", "\n", "def my_mkdirs(folder):\n", " if os.path.exists(folder)==False:\n", " os.makedirs(folder)\n", "\n", "\n", "tgt_directory = f'{home_directory}tmp'\n", "my_mkdirs(f'{tgt_directory}')\n", "\n", "tgt_folder = '/content/'\n", "suffixes = ['.png', '.jpeg' , '.webp' , '.jpg']\n", "num = 1\n", "for filename in os.listdir(tgt_folder):\n", " for suffix in suffixes:\n", " if not filename.find(suffix)>-1: continue\n", " print(filename)\n", " %cd {home_directory}\n", " input_image = Image.open(f\"{filename}\").convert('RGB')\n", " %cd {tgt_directory}\n", " input_image.save(f'{162 + num}.png', \"PNG\")\n", " num = num+1" ], "metadata": { "id": "yVnuCVjNNdim", "outputId": "a8ab6e34-a4c9-4c4a-8144-8f6d76f86ae8", "colab": { "base_uri": "https://localhost:8080/" } }, "execution_count": 3, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "/content\n", "2025-01-29 11.35.10_1.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.15.21.webp\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.18_6.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.18_5.jpg\n", "/content\n", "/content/tmp\n", "2012-05-15 22.22.14.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.18_3.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.23_3.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.21_4.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.20_2.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.21_1.jpg\n", "/content\n", "/content/tmp\n", "2013-01-14 01.03.06.jpg\n", "/content\n", "/content/tmp\n", "2025-01-18 04.07.57_9.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.18_1.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.23_1.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.22_2.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.35.10_3.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.23_2.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.35.10_5.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.22.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.18_9.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.20_1.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.21_5.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.18_4.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.07.42.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.35.10_2.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.17.17.jpg\n", "/content\n", "/content/tmp\n", "2025-01-18 04.07.55.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.21_6.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.08.28.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.18.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.13.36.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.22_4.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.18_7.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.12.03.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.21_3.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.24_3.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.18_2.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.20.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.08.20.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.21.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.35.10_4.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.24.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.24_1.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.24_2.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.22_1.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.24_4.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.22_3.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.23.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.35.10.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.22_5.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.18_8.jpg\n", "/content\n", "/content/tmp\n", "2025-01-29 11.01.21_2.jpg\n", "/content\n", "/content/tmp\n" ] } ] }, { "cell_type": "code", "source": [ "%cd /content/\n", "import shutil\n", "shutil.make_archive('prompts', 'zip', '/content/tmp')" ], "metadata": { "id": "0C4KHOeN7V_O", "outputId": "c8559382-6a1d-4df2-84e0-a78017f7e9e4", "colab": { "base_uri": "https://localhost:8080/", "height": 53 } }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "/content\n" ] }, { "output_type": "execute_result", "data": { "text/plain": [ "'/content/prompts.zip'" ], "application/vnd.google.colaboratory.intrinsic+json": { "type": "string" } }, "metadata": {}, "execution_count": 10 } ] }, { "cell_type": "code", "source": [ "from PIL import Image\n", "\n", "# Open an existing image\n", "#image = Image.open('new_york_city.jpg')\n", "\n", "# Save the image in a different format\n", "\n", "\n", "suffix = 'png'\n", "for number in range(300):\n", " try:\n", " %cd /content/\n", " input_image = Image.open(f\"/content/tmp/{number+1}.{suffix}\").convert('RGB')\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " %cd /content/tmp\n", " f = open(f\"{number+1}.txt\", \"w\")\n", " f.write(f'{caption}')\n", " f.close()\n", " print(f\"...\\n\\n...caption for {number+1}.{suffix}\\n\\n...\")\n", " print(caption)\n", " except:\n", " continue\n", "#----#\n" ], "metadata": { "id": "NbiUlfjD3iwB", "outputId": "74750d45-5853-4736-8d96-a75157c372a0", "colab": { "base_uri": "https://localhost:8080/" } }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "/content\n", "Prompt: Describe the image in 400 words\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "/usr/local/lib/python3.11/dist-packages/bitsandbytes/autograd/_functions.py:315: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n", " warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n" ] } ] }, { "cell_type": "code", "source": [ "%cd /content/\n", "import shutil\n", "shutil.make_archive('prompts', 'zip', '/content/tmp')" ], "metadata": { "id": "E7t75uf3GUVP", "outputId": "894bd2ab-3a7e-4856-88d7-371fe32bba04", "colab": { "base_uri": "https://localhost:8080/", "height": 53 } }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "/content\n" ] }, { "output_type": "execute_result", "data": { "text/plain": [ "'/content/prompts.zip'" ], "application/vnd.google.colaboratory.intrinsic+json": { "type": "string" } }, "metadata": {}, "execution_count": 7 } ] }, { "cell_type": "code", "source": [ "#initialize\n", "import torch\n", "from safetensors.torch import load_file, save_file\n", "from google.colab import drive\n", "drive.mount('/content/drive')" ], "metadata": { "id": "qMLhG1PBYHba", "outputId": "fb0469d2-a406-4015-87a2-0386c6928071", "colab": { "base_uri": "https://localhost:8080/" } }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Mounted at /content/drive\n" ] } ] }, { "cell_type": "code", "source": [ "import torch\n", "from safetensors.torch import load_file, save_file\n", "import torch.nn as nn\n", "from torch import linalg as LA\n", "import os\n", "import math\n", "import random\n", "import numpy as np\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", "# This method rescales a _lora to a given ratio. I haven't tested it\n", "# yet but this is more or less how it works\n", "def rescale_and_save(_lora , savefile_name, new_ratio):\n", " count = 0\n", " lora = {}\n", " for key in _lora:count = count + 1\n", " NUM_ITEMS = count\n", " count = 0\n", " decimals = 6\n", " for key in _lora:\n", " if not f'{key}'.find('alpha') > -1: continue\n", " alpha = f'{key}'\n", " up = f'{key}'.replace('alpha' , 'lora_up.weight')\n", " down = f'{key}'.replace('alpha' , 'lora_down.weight')\n", " #------#\n", " rank = _lora[f'{down}'].shape[0]\n", " new_alpha = torch.tensor(new_ratio*rank).to(device = device , dtype=torch.float32)\n", " lora[up] = torch.round(torch.sqrt(_lora[alpha]/new_alpha)*_lora[up], decimals = decimals).to(device = device , dtype=torch.float32)\n", " lora[down] = torch.round(torch.sqrt(_lora[alpha]/new_alpha)*_lora[down], decimals = decimals).to(device = device , dtype=torch.float32)\n", " #-----#\n", " lora[alpha] = (new_alpha/_lora[alpha])*_lora[alpha].to(device = device , dtype=torch.float32)\n", " count = count + 3\n", " print(f'{count} / {NUM_ITEMS}')\n", " #--------#\n", " print(f'done!')\n", " print(f'casting params to fp16....')\n", " for key in lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", " #-------#\n", " print(f'done!')\n", " print(f'saving {savefile_name}...')\n", " save_file(lora , f'{savefile_name}')\n", " #-----------#\n", "\n", "tgt = load_file(\"/content/drive/MyDrive/'Your Name (Makoto Shinkai)' - v3 - [STYLE] [LORA] [FLUX] - spectrum_0008 by 'AI_Characters'.safetensors\")\n", "for key in tgt:\n", " tgt[f'{key}'] = tgt[f'{key}'].to(device = device , dtype=torch.float32)\n", "\n", "\n", "if False:\n", " for key in tgt:\n", " if f'{key}'.find('alpha')>-1: print(tgt[f'{key}'])\n", " print(f\" {key} : {tgt[f'{key}'].shape}\")\n", "\n", "name = 'shinkai'\n", "savefile_name = f'{name}.safetensors'\n", "new_ratio = 0.5\n", "\n", "\n", "if True:\n", " rescale_and_save(tgt , savefile_name, new_ratio)\n", " #(alpha/scale) = (32/16)\n", "\n", " tgt = load_file(f'{savefile_name}')\n", " for key in tgt:\n", " if f'{key}'.find('alpha')>-1: print(tgt[f'{key}'])\n", " print(f\" {key} : {tgt[f'{key}'].shape}\")" ], "metadata": { "id": "XoOQgjdVYLgs" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "import torch\n", "from safetensors.torch import load_file, save_file\n", "import torch.nn as nn\n", "from torch import linalg as LA\n", "import os\n", "import math\n", "import random\n", "import numpy as np\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", "# filter_and_save\n", "# Use this method to change the scale = (rank/alpha) value of a given _lora\n", "# This method will also eliminate noise. All values < zero_threshold * e-6 will be set to 0\n", "# in the delta_W of this LoRa. The processed LoRa will be saved as a .safetensor file in fp16\n", "# The rank of the LoRa affect the file size. At rank 32 the filesize is 300 MB , at rank 16 the filesize is 150 MB and so on.\n", "#\n", "# When merging LoRa , it is important that\n", "# a) the scale of all the merged LoRas are the same. I use the scale = (alpha/rank) = 0.5 at all times.\n", "# For rank 32 , the alpha must be 16 , for example.\n", "#\n", "# b) The rank of the merged LoRas should be 32 or below , any larger values might trigger a 'Out of Memory' error on Google Colab GPU:s\n", "# --------------\n", "# _lora - The lora which you wish to process\n", "# savefile_name - The name of the savefile to be created. Make sure the savefile_name ends with the '.safetensors' suffix\n", "# new_rank - The rank you wish to set the LoRa to\n", "# new_alpha - The alpha value you wish to set the LoRa to. For proper scaling ,\n", "# set the alpha value to half the value of the rank so (alpha/rank) = 0.5\n", "# This is a very common scale for trained LoRa\n", "#\n", "# zero_threshold - All values < zero_threshold * e-6 will be set to 0\n", "# in the delta_W of this LoRa. This is useful to eliminate 'junk' in the output of the\n", "# Lora when scaling it to strength above 0.8. A high zero_threshold will also make the Lora more compatible with other LoRa\n", "# , at the expense of making the LoRa less true to the originally trained image output.\n", "#\n", "def filter_and_save(_lora , savefile_name, new_rank , new_alpha, zero_threshold):\n", " lora = {}\n", " count = 0\n", " for key in _lora:count = count + 1\n", " NUM_ITEMS = count\n", " count = 0\n", " thresh = zero_threshold*0.000001 # 1e-6\n", " #-------#\n", " for key in _lora:\n", " if f'{key}'.find('alpha') > -1:\n", " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", " count = count + 1\n", " print(f'{count} / {NUM_ITEMS}')\n", " continue\n", " #------#\n", " if not f'{key}'.find('lora_down') > -1: continue\n", " up = f'{key}'.replace('lora_down' , 'lora_up')\n", " down = f'{key}'\n", " #-------#\n", " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", " #---#\n", " N = delta_W.numel()\n", " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", " y = torch.zeros(y.shape).to(device = device , dtype=torch.float32)\n", " y[indices[values>thresh]] = 1\n", " y[indices[values<-thresh]] = 1\n", " y = y.unflatten(0,delta_W.shape).to(device = device , dtype=torch.float32)\n", " delta_W = torch.mul(delta_W,y).to(device = device , dtype=torch.float32)\n", " #------#\n", " tmp={}\n", " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", " tmp['u'] = tmp['u'][:,: new_rank]\n", " tmp['s'] = tmp['s'][: new_rank]\n", " #-------#\n", " tmp['u'] = torch.round(torch.matmul(tmp['u'], torch.diag(tmp['s'])),decimals=6)\n", " tmp['Vh'] = torch.round(tmp['Vh'].t()[: new_rank,:],decimals=6)\n", " #-------#\n", " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", " lora[up] = tmp['u'].to(device = device , dtype=torch.float32)\n", " lora[down] = tmp['Vh'].to(device = device , dtype=torch.float32)\n", " #-------#\n", " count = count + 2\n", " print(f'{count} / {NUM_ITEMS}')\n", " #-------#\n", " print(f'done!')\n", " print(f'casting params to fp16....')\n", " for key in _lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", " #-------#\n", " print(f'done!')\n", " print(f'saving {savefile_name}...')\n", " save_file(lora , f'{savefile_name}')\n", "#--------#\n", "\n", "# count_zeros\n", "# Use this method to guage how large zero_threshold you should set for a given Lora.\n", "# This function can serve as a 'preview' prior to running either the filter_and_save or\n", "# merge_and_save methods. Since it does not use SVD to re-pack the LoRa\n", "# , you can run this method on a non-GPU instance on the Colab\n", "#-----------\n", "# _lora - The lora which you wish to process\n", "# zero_threshold - All values < zero_threshold * e-6 will be set to 0\n", "def count_zeros(_lora, zero_threshold):\n", " count = 0\n", " for key in _lora:count = count + 1\n", " NUM_ITEMS = count\n", " count = 0\n", " #-----#\n", " thresh = zero_threshold*0.000001 # 1e-6\n", "\n", " print(f'at zero_threshold = {zero_threshold}e-6 :')\n", " for key in _lora:\n", " if f'{key}'.find('alpha') > -1:\n", " count = count + 1\n", " continue\n", " #------#\n", " if not f'{key}'.find('lora_down') > -1: continue\n", " up = f'{key}'.replace('lora_down' , 'lora_up')\n", " down = f'{key}'\n", " #-------#\n", " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", " N = delta_W.numel()\n", " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", " y = torch.ones(y.shape).to(device = device , dtype=torch.float32)\n", " y[indices[values>thresh]] = 0\n", " neg_pcnt = round((100*torch.sum(y) / N).item(),2)\n", " y[indices[values<-thresh]] = 0\n", " count = count + 2\n", " pcnt = round((100*torch.sum(y) / N).item(),2)\n", " neg_pcnt = round(neg_pcnt - pcnt,2) # remove zero % from neg_pcnt\n", " pos_pcnt = round(100- pcnt - neg_pcnt,2)\n", " print(f'at {count} / {NUM_ITEMS} : {pcnt} % zeros ,{pos_pcnt} % pos. , {neg_pcnt} % neg ')\n", " #------#\n", "#-----#\n", "\n", "# This method rescales a _lora to a given ratio. I haven't tested it\n", "# But this is more or less how it works\n", "def rescale_and_save(_lora , savefile_name, new_ratio):\n", " count = 0\n", " lora = {}\n", " for key in _lora:count = count + 1\n", " NUM_ITEMS = count\n", " count = 0\n", " decimals = 6\n", " for key in _lora:\n", " if not f'{key}'.find('alpha') > -1: continue\n", " alpha = f'{key}'\n", " up = f'{key}'.replace('alpha' , 'lora_up')\n", " down = f'{key}'.replace('alpha' , 'lora_down')\n", " #------#\n", " rank = torch.matmul(_lora[up]*0,_lora[down]*0).shape[0]\n", " new_alpha = torch.tensor(new_ratio*rank).to(device = device , dtype=torch.float32)\n", " lora[up] = torch.round(torch.sqrt(_lora[alpha]/new_alpha)*_lora[up], decimals = decimals).to(device = device , dtype=torch.float32)\n", " lora[down] = torch.round(torch.sqrt(_lora[alpha]/new_alpha)*_lora[down], decimals = decimals).to(device = device , dtype=torch.float32)\n", " lora[alpha] = (new_alpha/_lora[alpha])*_lora[alpha].to(device = device , dtype=torch.float32)\n", " count = count + 3\n", " print(f'{count} / {NUM_ITEMS}')\n", " #--------#\n", " print(f'done!')\n", " print(f'casting params to fp16....')\n", " for key in lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", " #-------#\n", " print(f'done!')\n", " print(f'saving {savefile_name}...')\n", " save_file(lora , f'{savefile_name}')\n", " #-----------#\n", "\n", "# merge_and_save\n", "# This method uses a general neural net merging method known as TIES - which is an loose abbreviation for\n", "# 'Trim Elect Sign & Merge' according to the paper : https://arxiv.org/pdf/2306.01708\n", "#------------#\n", "# _lora1 - The lora which you wish to process.\n", "# _lora2 - The lora which you wish to process.\n", "# _lora3 - The lora which you wish to process.\n", "\n", "# NOTE about loras :\n", "#_lora1 , _lora2 and lora_3 can have different ranks.\n", "# Make sure the scale of all three loras are the same\n", "\n", "#The scale is defined as (alpha/rank) and should be 0.5\n", "# If the alpha value is too high or too low , for example if (alpha/rank) = 1\n", "# then run rescale_and_save(_lora , savefile_name, new_alpha) , where new_alpha is given by the rank of the LoRa\n", "# For example , a LoRa of rank 32 must have an alpha value of 16 for the scale = (alpha/rank) = 0.5 to be True\n", "\n", "# However , make sure each lora rank is equal or below 32 ,\n", "#or that the sum of ranks does not exceed 3*32 = 96 , to not exceed GPU requirements on Google Colab. Slightly higher values might be fine.\n", "# Haven't tested it since I prefer merging LoRa at rank 32\n", "\n", "# savefile_name - The name of the savefile to be created. Make sure the savefile_name ends with the '.safetensors' suffix\n", "# new_rank - The rank you wish to set the LoRa to\n", "# new_alpha - The alpha value you wish to set the LoRa to. For proper scaling ,\n", "# set the alpha value to half the value of the rank so (alpha/rank) = 0.5\n", "# This is a very common scale for trained LoRa\n", "#\n", "# zero_threshold - All values < zero_threshold * e-6 will be set to 0\n", "# in the delta_W of this LoRa. This is useful to eliminate 'junk' in the output of the\n", "# Lora when scaling it to strength above 0.8. A high zero_threshold will also make the Lora more compatible with other LoRa\n", "import torch\n", "from safetensors.torch import load_file, save_file\n", "import torch.nn as nn\n", "from torch import linalg as LA\n", "import os\n", "import math\n", "import random\n", "import numpy as np\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", "# filter_and_save\n", "# Use this method to change the scale = (rank/alpha) value of a given _lora\n", "# This method will also eliminate noise. All values < zero_threshold * e-6 will be set to 0\n", "# in the delta_W of this LoRa. The processed LoRa will be saved as a .safetensor file in fp16\n", "# The rank of the LoRa affect the file size. At rank 32 the filesize is 300 MB , at rank 16 the filesize is 150 MB and so on.\n", "#\n", "# When merging LoRa , it is important that\n", "# a) the scale of all the merged LoRas are the same. I use the scale = (alpha/rank) = 0.5 at all times.\n", "# For rank 32 , the alpha must be 16 , for example.\n", "#\n", "# b) The rank of the merged LoRas should be 32 or below , any larger values might trigger a 'Out of Memory' error on Google Colab GPU:s\n", "# --------------\n", "# _lora - The lora which you wish to process\n", "# savefile_name - The name of the savefile to be created. Make sure the savefile_name ends with the '.safetensors' suffix\n", "# new_rank - The rank you wish to set the LoRa to\n", "# new_alpha - The alpha value you wish to set the LoRa to. For proper scaling ,\n", "# set the alpha value to half the value of the rank so (alpha/rank) = 0.5\n", "# This is a very common scale for trained LoRa\n", "#\n", "# zero_threshold - All values < zero_threshold * e-6 will be set to 0\n", "# in the delta_W of this LoRa. This is useful to eliminate 'junk' in the output of the\n", "# Lora when scaling it to strength above 0.8. A high zero_threshold will also make the Lora more compatible with other LoRa\n", "# , at the expense of making the LoRa less true to the originally trained image output.\n", "#\n", "def filter_and_save(_lora , savefile_name, new_rank , new_alpha, zero_threshold):\n", " lora = {}\n", " count = 0\n", " for key in _lora:count = count + 1\n", " NUM_ITEMS = count\n", " count = 0\n", " thresh = zero_threshold*0.000001 # 1e-6\n", " #-------#\n", " for key in _lora:\n", " if f'{key}'.find('alpha') > -1:\n", " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", " count = count + 1\n", " print(f'{count} / {NUM_ITEMS}')\n", " continue\n", " #------#\n", " if not f'{key}'.find('lora_down') > -1: continue\n", " up = f'{key}'.replace('lora_down' , 'lora_up')\n", " down = f'{key}'\n", " #-------#\n", " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", " #---#\n", " N = delta_W.numel()\n", " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", " y = torch.zeros(y.shape).to(device = device , dtype=torch.float32)\n", " y[indices[values>thresh]] = 1\n", " y[indices[values<-thresh]] = 1\n", " y = y.unflatten(0,delta_W.shape).to(device = device , dtype=torch.float32)\n", " delta_W = torch.mul(delta_W,y).to(device = device , dtype=torch.float32)\n", " #------#\n", " tmp={}\n", " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", " tmp['u'] = tmp['u'][:,: new_rank]\n", " tmp['s'] = tmp['s'][: new_rank]\n", " #-------#\n", " tmp['u'] = torch.round(torch.matmul(tmp['u'], torch.diag(tmp['s'])),decimals=6)\n", " tmp['Vh'] = torch.round(tmp['Vh'].t()[: new_rank,:],decimals=6)\n", " #-------#\n", " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", " lora[up] = tmp['u'].to(device = device , dtype=torch.float32)\n", " lora[down] = tmp['Vh'].to(device = device , dtype=torch.float32)\n", " #-------#\n", " count = count + 2\n", " print(f'{count} / {NUM_ITEMS} , rank {_lora[down].shape[0]}')\n", " #-------#\n", " print(f'done!')\n", " print(f'casting params to fp16....')\n", " for key in _lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", " #-------#\n", " print(f'done!')\n", " print(f'saving {savefile_name}...')\n", " save_file(lora , f'{savefile_name}')\n", "#--------#\n", "\n", "# count_zeros\n", "# Use this method to guage how large zero_threshold you should set for a given Lora.\n", "# This function can serve as a 'preview' prior to running either the filter_and_save or\n", "# merge_and_save methods. Since it does not use SVD to re-pack the LoRa\n", "# , you can run this method on a non-GPU instance on the Colab\n", "#-----------\n", "# _lora - The lora which you wish to process\n", "# zero_threshold - All values < zero_threshold * e-6 will be set to 0\n", "def count_zeros(_lora, zero_threshold):\n", " count = 0\n", " for key in _lora:count = count + 1\n", " NUM_ITEMS = count\n", " count = 0\n", " #-----#\n", " thresh = zero_threshold*0.000001 # 1e-6\n", "\n", " print(f'at zero_threshold = {zero_threshold}e-6 :')\n", " for key in _lora:\n", " if f'{key}'.find('alpha') > -1:\n", " count = count + 1\n", " continue\n", " #------#\n", " if not f'{key}'.find('lora_down') > -1: continue\n", " up = f'{key}'.replace('lora_down' , 'lora_up')\n", " down = f'{key}'\n", " #-------#\n", " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", " N = delta_W.numel()\n", " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", " y = torch.ones(y.shape).to(device = device , dtype=torch.float32)\n", " y[indices[values>thresh]] = 0\n", " neg_pcnt = round((100*torch.sum(y) / N).item(),2)\n", " y[indices[values<-thresh]] = 0\n", " count = count + 2\n", " pcnt = round((100*torch.sum(y) / N).item(),2)\n", " neg_pcnt = round(neg_pcnt - pcnt,2) # remove zero % from neg_pcnt\n", " pos_pcnt = round(100- pcnt - neg_pcnt,2)\n", " print(f'at {count} / {NUM_ITEMS} : {pcnt} % zeros ,{pos_pcnt} % pos. , {neg_pcnt} % neg ')\n", " #------#\n", "#-----#\n", "\n", "# This method rescales a _lora to a given ratio. I haven't tested it\n", "# yet but this is more or less how it works\n", "def rescale_and_save(_lora , savefile_name, new_ratio):\n", " count = 0\n", " lora = {}\n", " for key in _lora:count = count + 1\n", " NUM_ITEMS = count\n", " count = 0\n", " decimals = 6\n", " for key in _lora:\n", " if not f'{key}'.find('alpha') > -1: continue\n", " alpha = f'{key}'\n", " up = f'{key}'.replace('alpha' , 'lora_up')\n", " down = f'{key}'.replace('alpha' , 'lora_down')\n", " #------#\n", " rank = lora[down].shape[0]\n", " new_alpha = torch.tensor(new_ratio*rank).to(device = device , dtype=torch.float32)\n", " lora[up] = torch.round(torch.sqrt(_lora[alpha]/new_alpha)*_lora[up], decimals = decimals).to(device = device , dtype=torch.float32)\n", " lora[down] = torch.round(torch.sqrt(_lora[alpha]/new_alpha)*_lora[down], decimals = decimals).to(device = device , dtype=torch.float32)\n", " lora[alpha] = (new_alpha/_lora[alpha])*_lora[alpha].to(device = device , dtype=torch.float32)\n", " count = count + 3\n", " print(f'{count} / {NUM_ITEMS}')\n", " #--------#\n", " print(f'done!')\n", " print(f'casting params to fp16....')\n", " for key in lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", " #-------#\n", " print(f'done!')\n", " print(f'saving {savefile_name}...')\n", " save_file(lora , f'{savefile_name}')\n", " #-----------#\n", "\n", "# merge_and_save\n", "# This method uses a general neural net merging method known as TIES - which is an loose abbreviation for\n", "# 'Trim Elect Sign & Merge' according to the paper : https://arxiv.org/pdf/2306.01708\n", "#------------#\n", "# _lora1 - The lora which you wish to process.\n", "# _lora2 - The lora which you wish to process.\n", "# _lora3 - The lora which you wish to process.\n", "\n", "# NOTE about loras :\n", "#_lora1 , _lora2 and lora_3 can have different ranks.\n", "# Make sure the scale of all three loras are the same\n", "\n", "#The scale is defined as (alpha/rank) and should be 0.5\n", "# If the alpha value is too high or too low , for example if (alpha/rank) = 1\n", "# then run rescale_and_save(_lora , savefile_name, new_alpha) , where new_alpha is given by the rank of the LoRa\n", "# For example , a LoRa of rank 32 must have an alpha value of 16 for the scale = (alpha/rank) = 0.5 to be True\n", "\n", "# Make sure the sum of ranks for _lora1,_lora2 and _lora3 does not exceed 3*32 = 96 , to not exceed GPU requirements on Google Colab. Slightly higher values might be fine.\n", "# Haven't tested it since I prefer merging LoRa at rank 32\n", "\n", "# savefile_name - The name of the savefile to be created. Make sure the savefile_name ends with the '.safetensors' suffix\n", "# new_rank - The rank you wish to set the LoRa to\n", "# new_alpha - The alpha value you wish to set the LoRa to. For proper scaling ,\n", "# set the alpha value to half the value of the rank so (alpha/rank) = 0.5\n", "# This is a very common scale for trained LoRa\n", "#\n", "# zero_threshold - All values < zero_threshold * e-6 will be set to 0\n", "# in the delta_W of this LoRa. This is useful to eliminate 'junk' persists in the delta_W after training. A high resolution size will also make the Lora more compatible with other LoRa\n", "# , at the expense of making the LoRa less true to the originally trained image output.\n", "def merge_and_save(_lora1 , _lora2 , _lora3, savefile_name, new_rank , new_alpha, zero_threshold):\n", " lora = {}\n", "\n", "\n", "\n", " count = 0\n", " for key in _lora1:count = count + 1\n", " NUM_ITEMS = count\n", " count = 0\n", " thresh = zero_threshold*0.000001 # 1e-6\n", " decimals = 2\n", "\n", " #-------#\n", " for key in _lora1:\n", " if not key in _lora2: continue\n", " if not key in _lora3: continue\n", " if f'{key}'.find('alpha') > -1:\n", " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", " count = count + 1\n", " print(f'{count} / {NUM_ITEMS}')\n", " continue\n", " #------#\n", " if not f'{key}'.find('lora_down') > -1: continue\n", " up = f'{key}'.replace('lora_down' , 'lora_up')\n", " down = f'{key}'\n", " #-------#\n", "\n", " # Setup\n", " delta_W = torch.matmul(_lora1[up]*0,_lora1[down]*0).to(device = device, dtype=torch.float32)\n", " tgt_shape = delta_W.shape\n", " N = delta_W.numel()\n", " delta_W = torch.zeros(N).to(device = device , dtype=torch.float32)\n", " #-----#\n", "\n", " #Positives\n", " Y = torch.zeros(3,N).to(device = device , dtype=torch.float32)\n", " Y[0] = torch.matmul(_lora1[up],_lora1[down]).flatten().to(device = device , dtype=torch.float32)\n", " Y[1] = torch.matmul(_lora2[up],_lora2[down]).flatten().to(device = device , dtype=torch.float32)\n", " Y[2] = torch.matmul(_lora3[up],_lora3[down]).flatten().to(device = device , dtype=torch.float32)\n", " Y[torch.abs(Y)0,dim=1) + 0.001\n", " elect = torch.sum(Y<0,dim=1) + 0.001\n", " elect = (num>=elect)\n", " Y[Y<0] = 0\n", " Y = torch.sum(Y, dim=1).to(device = device , dtype=torch.float32)\n", " delta_W[elect] = torch.round((Y[elect]/num[elect]),decimals=decimals).to(device = device , dtype=torch.float32)\n", " #-----#\n", "\n", " #Negatives\n", " Y = torch.zeros(3,N).to(device = device , dtype=torch.float32)\n", " Y[0] = torch.matmul(_lora1[up],_lora1[down]).flatten().to(device = device , dtype=torch.float32)\n", " Y[1] = torch.matmul(_lora2[up],_lora2[down]).flatten().to(device = device , dtype=torch.float32)\n", " Y[2] = torch.matmul(_lora3[up],_lora3[down]).flatten().to(device = device , dtype=torch.float32)\n", " Y[torch.abs(Y)0,dim=1) + 0.1\n", " elect = (elect0] = 0\n", " Y = torch.sum(Y, dim=1).to(device = device , dtype=torch.float32)\n", " delta_W[elect] = torch.round(Y[elect]/num[elect],decimals=decimals).to(device = device , dtype=torch.float32)\n", " #----#\n", "\n", " # Free up memory prior to SVD\n", " delta_W = delta_W.unflatten(0,tgt_shape).to(device = device , dtype=torch.float32)\n", " delta_W = delta_W.clone().detach()\n", " Y = {}\n", " num = {}\n", " elect = {}\n", " #-----#\n", "\n", " # Run SVD (Single Value Decomposition)\n", " #to get the new lora_up and lora_down for delta_W\n", " tmp={}\n", " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", " tmp['u'] = tmp['u'][:,: new_rank]\n", " tmp['s'] = tmp['s'][: new_rank]\n", " tmp['u'] = torch.matmul(tmp['u'], torch.diag(tmp['s']))\n", " tmp['Vh'] = tmp['Vh'].t()[: new_rank,:]\n", " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", " lora[up] = torch.round(tmp['u'],decimals=decimals).to(device = device , dtype=torch.float32)\n", " lora[down] = torch.round(tmp['Vh'],decimals=decimals).to(device = device , dtype=torch.float32)\n", " #-------#\n", "\n", " count = count + 2\n", " print(f'{count} / {NUM_ITEMS}')\n", " #----#\n", " #--------#\n", " print(f'done!')\n", " print(f'casting params to fp16....')\n", " for key in lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", " #-------#\n", " print(f'done!')\n", " print(f'saving {savefile_name}...')\n", " save_file(lora , f'{savefile_name}')\n", "#------#\n", "\n", "new_rank = 16\n", "new_alpha = math.floor(new_rank/2)\n", "zero_threshold = 200 # e-6\n", "name = 'tuff_nuff_camel'\n", "a = load_file('/content/tuff.safetensors')\n", "b = load_file('/content/nuff.safetensors')\n", "c = load_file('/content/camel.safetensors')\n", "savefile_name = f'{name}_{zero_threshold}_r{new_rank}_a{new_alpha}.safetensors'\n", "\n", "count = 0\n", "for key in a:\n", " a[f'{key}'] = a[f'{key}'].to(device = device , dtype = torch.float32)\n", " count = count + 1\n", "print(f'num keys for a : {count}')\n", "count = 0\n", "for key in b:\n", " b[f'{key}'] = b[f'{key}'].to(device = device , dtype = torch.float32)\n", " count = count + 1\n", "print(f'num keys for b : {count}')\n", "count = 0\n", "for key in c:\n", " c[f'{key}'] = c[f'{key}'].to(device = device , dtype = torch.float32)\n", " count = count + 1\n", "print(f'num keys for c : {count}')\n", "\n", "tgt = {}\n", "#-----#\n", "print(f'for {name}.safetensors at scale = (rank/alpha) = 0.5')\n", "merge_and_save(b,c,a, savefile_name, new_rank , new_alpha, zero_threshold)\n", "\n", "#filter_and_save(a , savefile_name, new_rank , new_alpha, zero_threshold)\n" ], "metadata": { "id": "7ZSHM5embMd0" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "ldMOiaY7kJLp", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "dac2b6f7-c7d4-4f23-963e-1db1f34769a0" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Prompt: Describe the image in 400 words\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:315: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n", " warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "...\n", "\n", "...caption for 22.jpg\n", "\n", "...\n", "This is a digital CGI rendering of a young woman with a realistic, hyper-detailed style. She has fair skin with a slight tan, and her long, straight black hair is pulled back into a high ponytail. Her eyes are a striking blue, accented with subtle eyeliner and mascara, and she has a calm, slightly serious expression with a hint of a smile. Her lips are full and naturally pink, with no lipstick.\n", "\n", "She is wearing a black, strapless top made of a shiny, smooth material that looks like leather or vinyl. The top has a harness-style design with straps that crisscross over her chest and back, featuring metal rings and buckles that add to the edgy, gothic aesthetic. The straps are black, adding contrast to her pale skin.\n", "\n", "The background is a simple gradient of dark gray and black, which makes the subject stand out. The lighting is soft and even, highlighting the contours of her face and body without casting any harsh shadows. The overall mood of the image is modern and slightly provocative, with a focus on the woman's confident and sensual presence. The texture of her hair and clothing is meticulously rendered, showcasing the artist's attention to detail and skill in digital art.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 23.jpg\n", "\n", "...\n", "A digital rendering of a woman in a provocative pose against a textured red wall. She has a pale skin tone and short, dark brown hair styled in a pixie cut. Her facial features are sharp and angular, with high cheekbones, a straight nose, and full lips painted a soft pink. Her green eyes are accentuated with subtle makeup, and she has a slightly parted mouth with a hint of a smile.\n", "\n", "She is wearing a daring, black fishnet bodysuit with a metallic gold trim that accentuates her curves. The bodysuit is cut low, revealing significant cleavage and a glimpse of her bare midriff. The fishnet material is sheer, allowing the metallic texture to peek through, adding an edgy contrast to the outfit. The bodysuit features cutouts and straps that wrap around her torso, emphasizing her slender waist and hips.\n", "\n", "Her pose is confident and slightly seductive, with one hand resting on her hip and the other hanging loose at her side. Her nails are manicured with a nude polish, matching the overall neutral color palette of her outfit. The background consists of a dark wooden paneling with a subtle grain texture, adding a rustic touch to the modern, sexy aesthetic of the image.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 24.jpg\n", "\n", "...\n", "A young woman stands against a plain white background, dressed in an outfit that screams modern and edgy fashion. She sports a deep green, strapless, velvet mini dress with a slightly off-the-shoulder neckline and large, ruffled, satin bow details on the shoulders. The dress hugs her slender frame, accentuating her small bust and slender waist. Her legs are wrapped in black, lace-patterned fishnet stockings that reach up to her mid-thigh, showing off her toned legs. She wears black, chunky platform high-heeled shoes with ankle straps, adding to her bold and confident look.\n", "\n", "Her fiery red hair cascades in loose waves down her back, adding a pop of color to her outfit. She accessorizes with a pair of large, black, over-the-ear headphones, giving her a modern, trendy vibe. In her right hand, she holds a small, black and white checkered clutch bag, adding a touch of class to her otherwise rebellious outfit.\n", "\n", "Her makeup is subtle, with a focus on her eyes, which are accentuated with dark eyeliner and mascara. She wears a black choker necklace around her neck, completing her edgy, avant-garde fashion style. The plain white background ensures all eyes are on her outfit and accessories, capturing the essence of modern fashion\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 25.jpg\n", "\n", "...\n", "A young woman stands against a plain white background, showing off her slender, petite figure. She has a fair complexion and long, wavy blonde hair cascading down to her shoulders. She is dressed in a sheer, deep red babydoll lingerie set, featuring delicate floral embroidery with green and red floral patterns. The babydoll has spaghetti straps with black trim and a lace trim along the edges, adding a touch of elegance and femininity. The garment is sheer, allowing a glimpse of her skin underneath, and the lace trim adds a touch of sensuality. The babydoll is short and flared, ending just above her knees, and the bottom hem is adorned with more floral embroidery. She is also wearing black thigh-high stockings, which contrast with her pale skin and the red of her lingerie. The woman is seen from the back, with her head turned slightly to the left, showcasing her profile. The background is plain white, with no other objects or distractions, ensuring that all attention is on her and the lingerie she is wearing. The image is well-lit, with soft lighting that highlights the texture and details of her outfit.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 26.jpg\n", "\n", "...\n", "A young woman stands against a plain white background, her back to the camera. She has long, straight, dark brown hair that flows down her back, and she's wearing a black lace skirt that drapes to her mid-thigh, with a subtle floral pattern on the fabric. The skirt has a high waistband that sits just below her hips. She's also wearing a dark purple corset with intricate floral embroidery in shades of pink and purple. The corset is a bit snug and features vertical lacing on the back, with thin straps over her shoulders. She has a black choker necklace around her neck, and a silver bracelet on her left wrist. Her skin is fair and smooth, with a slight tan, and she appears to be of Caucasian descent. She's looking slightly to her right, with her head tilted slightly to the side, giving her a casual and slightly contemplative expression. The lighting is even and bright, with no shadows, making her outfit and the details of the lace and embroidery stand out clearly. The overall style of the outfit is gothic or Victorian-inspired, with a focus on dark, rich colors and intricate details.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 27.jpg\n", "\n", "...\n", "A young woman stands against a plain white background, her body angled to the left. She's got a light to medium skin tone and a slender, toned physique. Her dark brown hair is slicked back in a high ponytail, and she's sporting bold, dark eyeliner and lipstick. She's wearing a black choker necklace with a small pendant.\n", "\n", "Her top is a sheer, deep red, long-sleeved crop top with a ruffled texture, featuring a keyhole cutout on the chest. The top is tied at the front, showing off her midriff and toned stomach.\n", "\n", "Her bottom half is a short, pleated mini skirt in a black and white tartan pattern. The skirt is held up by a black elastic band with a metal ring at the top.\n", "\n", "Her pose is confident and poised, with her head tilted slightly to the side, and her lips slightly parted. She's looking directly at the camera with a neutral expression. The overall style is edgy and gothic, with a focus on bold colors and textures. The lighting is bright and even, eliminating any shadows and highlighting the details of her outfit and features. The image is a photograph, captured in a professional studio setting.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 28.jpg\n", "\n", "...\n", "A young woman with a short, tousled black bob haircut and striking blue eyes is seen from the shoulders up, making a playful kissy face at the camera. She has fair skin and is wearing a black bra with thin straps, which accentuates her small to medium-sized breasts. Her makeup is minimal, with a subtle smoky eye and nude lip color, adding to her youthful and innocent appearance.\n", "\n", "The background consists of a white, textured fabric that appears to be a bedsheet, adding a soft and neutral backdrop that contrasts with her dark hair and clothing. The lighting is soft and natural, creating a warm and inviting atmosphere. Her expression is playful and slightly seductive, with her lips slightly parted and a hint of a smile.\n", "\n", "The image captures a moment of intimacy and vulnerability, with the focus on her facial expression and the texture of her skin and hair. The overall mood is casual and personal, as if the photo was taken in a private setting, such as a bedroom. The image is high-resolution, with sharp details that enhance the textures and colors, making it a clear and vivid photograph.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 29.jpg\n", "\n", "...\n", "A young woman with fair skin and a slim, petite figure is posing against a dark, ornate background with a golden, swirling pattern. She has bright pink, shoulder-length hair styled in twin pigtails, with a yellow headband featuring bunny ears and a cross-shaped hair clip. Her makeup is dramatic, with heavy eyeliner, false eyelashes, and bright pink lipstick.\n", "\n", "She is dressed in a shiny, black latex bikini set that accentuates her small breasts and flat stomach. The bikini top has the word \"HOT\" in bold, multicolored letters, and the bottoms are a matching black latex material. She wears white, knee-high platform boots with chunky soles, adding to her playful and provocative look.\n", "\n", "Her pose is dynamic, with one leg bent and lifted, showcasing her toned legs and the texture of the latex. She is looking directly at the camera with a mischievous smile, her right hand resting on her cheek and her left hand gripping her bent leg. The lighting highlights the glossy texture of her outfit and the softness of her hair, creating a vibrant and bold image. The overall style is a mix of cosplay and fashion photography, combining fantasy elements with a sensual, edgy aesthetic.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 30.jpg\n", "\n", "...\n", "A young Asian woman with a slender, petite build and pale skin sits on the floor against a plain light gray background. She sports a black, lace-trimmed bra that accentuates her small breasts and matching panties. Her long, straight black hair is styled in twin pigtails, with bangs that frame her face. Her blue eyes are accentuated with subtle makeup, and her expression is neutral with a slight, almost coy smile.\n", "\n", "She is wearing a pair of black, high-heeled platform boots with multiple straps and buckles, adding an edgy, gothic vibe to her outfit. The boots are made of leather and feature a chunky, platform sole, giving her a bold and confident look. Her legs are slightly apart, and she is seated with her right hand resting on her thigh and her left hand supporting her chin. The overall pose is relaxed yet provocative, with her knees bent and feet slightly apart.\n", "\n", "The lighting is soft and even, highlighting the texture of her skin and the fabric of her clothes. There are no other objects or people in the background, focusing all attention on her. The style of the photograph is contemporary and provocative, emphasizing the model's androgynous features and alternative fashion aesthetic.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 31.jpg\n", "\n", "...\n", "A young Asian woman is dressed in a provocative cosplay outfit, posing in front of a pole. She has long, straight black hair with bangs and a slim, curvy physique with large breasts. She is wearing a shiny, black latex jacket that covers her arms and chest, leaving her breasts exposed. The jacket has large blue circular designs on the chest, which are attached to the front by straps. She is also wearing black latex panties and thigh-high stockings with garter belts. Her skin is light tan, and her expression is playful, with her tongue sticking out and eyes closed.\n", "\n", "The background features a blurry scene with a vertical pole, which she is gripping with both hands. The pole is covered with a black cylinder that she is leaning on, and she has her tongue sticking out in a playful manner. The background is dimly lit with a blue and purple color scheme, giving the scene a futuristic or sci-fi vibe. The floor is made of a smooth, gray material, and there are some yellow and red striped objects in the background, possibly part of a set or props.\n", "\n", "The overall image is a mix of erotic and cosplay elements, with a focus on the subject's body and the provocative nature of the outfit.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 32.jpg\n", "\n", "...\n", "A young woman in a provocative cosplay outfit poses in front of a dimly lit, industrial-looking background. She is of East Asian descent with long, straight black hair that cascades over her shoulders. Her skin is light and smooth, and she has large, expressive eyes with a hint of makeup. She is wearing a black latex outfit that is both revealing and futuristic, with a high-collared, cropped jacket that exposes her midriff and large breasts. The outfit includes a pair of high-waisted, black latex panties with a shiny, glossy texture, and thigh-high stockings that are also latex. She has a small tattoo on her lower abdomen and a navel piercing. In her right hand, she holds a black baton, and her left hand is raised to her face, resting on her cheek.\n", "\n", "The background features a large, metallic shutter with vertical slats, giving a sense of an urban or industrial setting. There are also some colorful, glowing objects in the background, possibly balloons, adding a playful touch to the otherwise serious and provocative scene. The overall mood of the image is edgy and sensual, with a focus on the character's confident and seductive pose.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 33.jpg\n", "\n", "...\n", "A young woman with long, straight black hair styled with bangs stands in a dimly lit room with a metallic background featuring a grid pattern. She has a fair complexion and a slender yet curvy physique. Her breasts are large and her nipples are covered with black pasties. She wears a black, shiny latex outfit that reveals most of her body, including a thong and thigh-high stockings. The outfit has straps and buckles adorned with metal rings and studs, adding a BDSM element to her look. Her skin is smooth and shiny, possibly due to the lighting or makeup.\n", "\n", "She has a small tattoo on her left hip and a navel piercing with a silver jewel. Her expression is sultry, with parted lips and a slightly raised chin. She is holding a black whip in her right hand, adding to the dominant theme of the outfit.\n", "\n", "The room has a modern, industrial feel with metallic walls and a few yellow caution tape strips on the floor, hinting at a setting that may be a fetish-themed photo shoot or an erotic scene. The lighting highlights the glossy texture of her outfit and the smoothness of her skin. The overall composition is striking and provocative.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 34.jpg\n", "\n", "...\n", "A highly detailed and realistic CGI rendering of a young woman posing provocatively. She has long, straight, jet-black hair that cascades past her shoulders and a fair, smooth complexion. Her large, round breasts are fully exposed, with black X-shaped pasties covering her nipples. She is completely nude except for a black, shiny latex jacket that covers her arms and a pair of black thigh-high latex boots that accentuate her long legs. The outfit is complemented by a black choker with silver studs and a matching wrist cuff.\n", "\n", "Her slender yet curvy physique is highlighted by her smooth, shiny skin. She stands confidently, holding a microphone in her right hand, with her left hand resting on her hip, creating a sense of dominance. The background features a dimly lit industrial setting, with a yellow and black striped safety barrier on the left side and a metallic grid on the right, suggesting a warehouse or laboratory environment. The lighting is harsh and artificial, casting deep shadows and emphasizing the glossy texture of her latex outfit. The overall style of the image is highly stylized and hyper-realistic, with a focus on the contrast between her smooth skin and the shiny, reflective material of her outfit.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 35.jpg\n", "\n", "...\n", "A young Japanese woman with a fair complexion and a slim, petite figure sits on a blue wicker bench outdoors, with a clear blue sky above and a blurred cityscape in the background. Her short, straight brown hair is styled with bangs and a pink ribbon tied in a bow on top, giving her a playful and cute look. She wears a matching pink bikini set, with a ruffled halter top that accentuates her medium-sized breasts and a matching ruffled bottom that sits low on her hips. The bikini top features small metallic rings on the sides of the straps, and a large pink bow is tied at the center of the top. She accessorizes with a delicate silver necklace with a small heart pendant and a matching pink headband. Her expression is soft and inviting, with a slight smile and a playful pose, as she holds her hands up in a cute gesture, her fingers slightly bent. The photo is taken in bright daylight, highlighting the vibrant colors of her outfit and the natural setting. The overall mood is cheerful and summery, with a focus on her youthful and innocent charm.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 36.jpg\n", "\n", "...\n", "A young woman is posed in a playful and provocative manner, striking a pose on a plush, golden cushion. She is dressed in a vibrant, revealing outfit that includes a black, shiny, sequined crop top with the word \"HOT\" in bright, neon blue and red letters across the front. Her top barely covers her breasts, leaving her flat chest exposed. She pairs the top with matching black bikini bottoms. The outfit is completed with white, high-heeled boots that have star and heart designs on them, adding a whimsical and youthful vibe.\n", "\n", "Her skin is fair and smooth, with a light complexion. She has long, wavy, neon pink hair styled in two high ponytails with yellow bunny ears attached, adding to her playful and cute appearance. Her makeup is striking, featuring heavy eyeliner and mascara, and bright pink lipstick. The background is a dark, textured wall, contrasting with the bright colors of her outfit and the warm glow of an orange light source to the left, creating a dramatic and sultry atmosphere.\n", "\n", "Her pose is seductive, with one knee bent and the other leg extended, while her arms rest on her thighs, highlighting her toned, slender physique. The overall style is a mix of cosplay and fashion photography, combining elements of fantasy and eroticism.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 37.jpg\n", "\n", "...\n", "A digital CGI image of a young woman with an anime style, showcasing her in a provocative pose against a dark background. She has fair skin and long, flowing orange hair that cascades down her back. Her face is delicate with large, expressive eyes and a slight smile. She wears round, golden-framed glasses that add to her sophisticated yet playful look.\n", "\n", "She is completely nude except for some accessories. Around her neck, she wears a golden choker with a cross-shaped pendant and a pair of dangling earrings that look like small crosses. Her breasts are covered by a pair of nipple pasties with black and gold crosses. A metallic chain with dangling charms wraps around her torso, partially covering her navel, which has a small piercing. She also has thigh-high black leather boots with gold buckles on the sides.\n", "\n", "The lighting highlights her smooth skin and the glossy texture of her hair and boots, creating a sense of depth and realism. The background is a gradient of dark to light gray, with a subtle red hue on the right side, adding a dramatic and sensual atmosphere to the image. The overall style combines elements of fantasy and eroticism, with a focus on the character's figure and accessories.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 38.jpg\n", "\n", "...\n", "A highly detailed CGI rendering features a curvy woman with a fair skin tone and short, messy green hair. She stands confidently with one arm raised, gripping a long, dark-colored pole, and the other arm resting on her hip. Her physique is voluptuous, with large, round breasts barely contained by a revealing black harness. The harness is made of thin, flexible straps that crisscross her chest, arms, and midriff, leaving her navel exposed and showcasing her toned abs. She wears a matching black thong and garter belt that accentuates her wide hips and thick thighs. Her expression is confident and seductive, with full lips and a slight, knowing smile.\n", "\n", "Her outfit is minimalistic and provocative, with a black cap and arm band adding to the edgy look. The background is a lush jungle scene with green vines and leaves, lit by a warm, yellow light that casts dramatic shadows and highlights her body. The overall style of the image is hyper-realistic, with a focus on the textures of her skin, the sheen of the fabric, and the intricate details of the harness. The setting suggests an exotic and adventurous theme, with the jungle adding an element of mystery and allure.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 39.jpg\n", "\n", "...\n", "A young woman with a pale skin tone and a slender, curvy physique stands in a modern bathroom. She has long, wavy hair that is half white and half black, and wears a black leather harness outfit that includes a strap around her neck, arm cuffs, and a skimpy thong. The harness accentuates her medium-sized breasts, which are adorned with black tape over her nipples. She has a small, silver belly button piercing and a tattoo on her left forearm of a flower with thorns.\n", "\n", "Her face is delicate and symmetrical, with a high cheekbone structure, full lips, and large eyes that are accentuated with heavy eyeliner and subtle makeup. She wears black hoop earrings and a small black headpiece with a white flower.\n", "\n", "The background features a modern bathroom with a dark gray tiled wall and a large mirror. A tall green plant is visible on the left side, and a sink and faucet are visible on the right. The lighting is soft and natural, creating a warm and intimate atmosphere. The overall style is modern and edgy, with a focus on the woman's provocative and daring outfit and the clean lines of the bathroom.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 40.jpg\n", "\n", "...\n", "A young woman with light skin and long, straight, black hair with a white streak is posed provocatively on a bed. She is wearing a black leather harness that wraps around her chest and waist, accentuating her medium-sized, perky breasts. Her nipples are exposed through the harness, and she is also wearing a black leather collar around her neck. She has a small, black thong on, with her legs spread wide apart, revealing her shaved vulva. She has a tattoo on her left forearm and a small tattoo on her right hip.\n", "\n", "The background shows a simple bedroom setting with a white wall and a framed picture of a colorful abstract painting hanging on the wall above the bed. The bed is covered with white sheets and a blue-gray throw pillow. To the right of the bed, there is a black lamp with a white shade, and a wooden nightstand with a dark wooden top and a white lampshade. The room is well-lit, likely with natural light coming in from a window not visible in the image. The overall mood of the image is erotic and sensual, with a focus on the woman's body and the details of her outfit and accessories.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 41.jpg\n", "\n", "...\n", "A young woman in a cosplay costume poses against a pink gradient background. She has a light tan complexion and long, straight black hair styled in twin braids with blunt bangs that frame her face. Her facial features include high cheekbones, a small nose, and full lips painted with a subtle pink gloss.\n", "\n", "She is wearing a black bikini top with thin straps, emphasizing her large breasts. Her bikini bottom is also black and features thin straps that tie around her hips, revealing her flat stomach and a navel piercing with a small, sparkling jewel. A black choker with a metal collar and a series of small, round, black studs encircles her neck.\n", "\n", "Over her shoulders, she drapes a black, long-sleeved coat with a silver zipper and multiple pockets. Her hands are raised, holding the edges of the coat, and she has black gloves covering her fingers. Her physique is athletic and toned, with a defined waist and curvy hips. The lighting highlights her smooth skin and the textures of her clothing, creating a striking contrast with the soft pink background. The overall style of the costume is a mix of anime and cyberpunk, with a focus on edgy and futuristic fashion elements.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 42.jpg\n", "\n", "...\n", "A young woman poses provocatively, with her long, straight black hair cascading down her shoulders. She has fair skin and wears a black latex bodysuit that clings tightly to her body, highlighting her large, round breasts. The bodysuit is glossy and shiny, adding a sleek and futuristic vibe to her look. Her expression is sultry, with slightly parted lips and a hint of a smile.\n", "\n", "She is positioned in a low-angle shot, emphasizing her curvaceous figure and the contrast between her skin and the reflective latex. Her navel is pierced with a small, silver ring, adding a touch of edginess to her outfit.\n", "\n", "Her left arm is raised, holding a piece of black tape, which she is applying to her inner thigh, suggesting a playful and erotic act. Her right leg is bent, with her foot resting on a surface, further accentuating her pose.\n", "\n", "The background features a dimly lit industrial setting with metal shutters and a hint of machinery, giving the scene a gritty, urban feel. The overall style is a mix of cosplay and fetish photography, blending elements of fantasy and sensuality. The photograph captures a moment of vulnerability and boldness, highlighting the subject's confidence and allure.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 43.jpg\n", "\n", "...\n", "A young woman in a provocative cosplay outfit poses indoors in front of a garage door with horizontal ribbed panels. She stands confidently with her legs slightly apart, holding a long black katana sword in her right hand, which she leans against her left shoulder. Her left hand, partially obscured by the phone, takes the selfie.\n", "\n", "She is dressed in a revealing black latex outfit, featuring a cropped top with a keyhole design that exposes her large, round breasts, and a matching pair of high-waisted panties. Her arms are adorned with black leather wristbands, and she wears thigh-high black latex boots with high heels, adding to her dominatrix-like appearance.\n", "\n", "Her long, straight black hair cascades down her back, partially covering her face, which is not visible due to the angle and the phone. The phone case has a white design with the word \"SARAH\" in bold black letters.\n", "\n", "The floor is covered with large gray tiles, and in the background, there are orange traffic cones and a metallic, coiled hose or cable, adding to the industrial setting. The lighting is bright and even, highlighting the shiny texture of her latex outfit and the sleek design of the sword. The overall mood of the image is bold and edgy, emphasizing both the woman's attire and the setting.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 44.jpg\n", "\n", "...\n", "A young woman in a provocative pose is posing nude in a room with a bamboo backdrop. She has fair skin and a slender, athletic build with large, natural breasts. Her nipples are covered by black X-shaped tape. She has a small, pierced navel and wears black thigh-high stockings. Her long, straight magenta hair is styled in a ponytail, and she wears a white, frilly scarf tied around her neck. She has a playful expression with her tongue sticking out, and her lips are painted a bright red. A black tape is placed on her inner thigh, partially covering her genitals. The background features a tall, lush bamboo plant and a wooden sliding door, suggesting an Asian-inspired room. A piece of furniture with a textured fabric is visible in the lower left corner, adding to the cozy, intimate atmosphere. The lighting is warm and soft, enhancing the colors and textures of the scene. The overall mood is erotic and playful, with the woman's confident and provocative pose complemented by her bold makeup and attire. The image is a photograph taken with high resolution, capturing fine details such as the texture of her skin, hair, and clothing.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 45.jpg\n", "\n", "...\n", "A highly detailed CGI rendering of a nude woman with fair skin and a slender, fit physique. She has large, perky breasts with black tape covering her nipples and a small, dark tattoo between them. Her pubic area is also exposed, with a small amount of pubic hair visible. She has a belly button piercing and a small amount of semen on her chest, stomach, and pubic area, indicating recent sexual activity.\n", "\n", "Her short, burgundy hair is styled in a bob with bangs and is held back by a red headband. Her makeup is minimal, featuring dark eyeliner and mascara, with a hint of red lipstick. She has a slightly plump lower lip and a small, cute nose.\n", "\n", "She is sitting on a tatami mat, legs spread apart, wearing only thigh-high black leather boots. The background is a Japanese-style room with bamboo plants and a tatami mat on the floor. Two white futons with red and gold Japanese characters are placed behind her, adding to the traditional Japanese atmosphere. The lighting is soft, casting a warm glow on her skin and the room.\n", "\n", "The style is hyper-realistic CGI, with detailed textures and shading to give the image a lifelike feel. The image is explicit and intended for mature audiences, focusing on the eroticism of the woman's\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 46.jpg\n", "\n", "...\n", "A young woman with a fair complexion and long, straight, vibrant magenta hair styled in a bob cut with bangs. Her hair is complemented by a pair of black, round sunglasses perched on top of her head. She has large, expressive eyes with a hint of makeup, giving her a doll-like appearance. Her lips are a bright red, accentuating her youthful and feminine features.\n", "\n", "She is wearing a white dress shirt with a high collar, featuring delicate floral embroidery on the chest and cuffs. The shirt has a black ribbon tied around the neck, adding a touch of sophistication. Over the shirt, she wears a black and purple jacket with a metallic sheen, adorned with white spiderweb patterns, giving her a gothic and edgy vibe. The jacket is open, revealing the shirt underneath.\n", "\n", "She is seated in an indoor setting with a plain white wall and a grey floor, which contrasts with her colorful outfit. The lighting is soft and natural, highlighting her smooth skin and the texture of her clothing. The overall style of the photograph is modern and anime-inspired, with a focus on cosplay and character portrayal. The image is high-resolution, capturing every detail of her outfit and expression.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 47.jpg\n", "\n", "...\n", "A young woman stands confidently in a dimly lit room, bathed in pink and purple hues from the lights. She has fair skin and a slender, toned physique with an hourglass figure. Her black hair is styled in a short, spiky fashion with a white streak on one side. She wears large, round glasses with black frames and a black blindfold that covers her eyes, adding a touch of mystery and sensuality.\n", "\n", "Her outfit is a daring, black leather harness bikini, revealing her ample breasts and toned abs. The bikini is strung together with thin straps, leaving her midriff exposed. She also wears a black and silver jacket with white and red details, open to reveal her upper body. The jacket has a futuristic design with a white collar and silver accents.\n", "\n", "In the background, the room is decorated with various objects, including a large, white and black framed mirror, a black and white flag, and a black and white checkered tablecloth. The overall vibe of the room is eclectic and modern, with a mix of dark and light elements. The woman's expression is confident and alluring, with a slight smile on her face.\n", "\n", "The image is a photograph, taken in a studio setting with professional lighting and styling, capturing both the subject and the room in vivid detail.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 48.jpg\n", "\n", "...\n", "A young woman with a pale complexion and a slender, curvy figure is posed seductively. She has long, wavy pink hair with yellow highlights, styled in two braided pigtails. Her makeup is bold, with heavy eyeliner, mascara, and a pink lip gloss. She wears a revealing black and white latex bikini that exposes her breasts, belly button, and part of her thighs. The bikini is held up by a white jacket with black and yellow accents, which she wears open, revealing her bare skin.\n", "\n", "She is seated on a stool, with one leg propped up, wearing black thigh-high stockings with garters. Her expression is sultry, with a slight pout and a direct gaze at the camera. She has a small tattoo on her right hip and a belly button piercing. The background is a cozy indoor setting with large windows that allow natural light to stream in, casting a warm glow on the scene. A bouquet of flowers and a vase are visible to the right, adding a touch of elegance to the space. The wooden floor adds a rustic element to the room, which is otherwise minimalist and clean. The overall style of the image is a mix of cosplay and provocative photography, emphasizing the subject's provocative attire and confident pose.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 49.jpg\n", "\n", "...\n", "A young woman with a slender, petite physique and fair skin stands against a bright pink backdrop. She has long, straight black hair with bangs and wears a pair of large, dark brown cat ears with white markings. Her makeup is minimal, highlighting her natural beauty with a soft, nude lip and a slight hint of blush on her cheeks. She is nude from the waist up, with her left arm raised and her right hand covering her small, perky breasts. Her nails are manicured and painted a pale pink, adding a touch of elegance to her otherwise provocative pose.\n", "\n", "She is draped in a black and blue striped jacket with a yellow emblem on the left sleeve, which is open to reveal her bare torso. The jacket is made of a glossy material, possibly vinyl or leather, and has a slightly shiny texture. The background is a solid bright pink, creating a stark contrast with her dark hair and skin tone, which makes her stand out prominently in the image. The lighting is soft and even, highlighting the contours of her body and the texture of her hair and jacket. The overall mood of the photograph is both playful and sensual, with a focus on the subject's youthful and innocent beauty.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 50.jpg\n", "\n", "...\n", "A young Asian woman with a slim, petite physique and fair skin poses in a modern bathroom. She has striking green hair styled in a bob with blunt bangs, framing her face. Her makeup is dramatic, with dark eyeliner, false lashes, and bold red lipstick, giving her a striking appearance. She wears a cropped black sweater that exposes her midriff, and she lifts it to partially cover her small breasts with her hands, revealing a tattoo on her left ribcage. Her black lace panties are high-waisted, accentuating her hourglass figure. Her skin is smooth and flawless, with a hint of a navel piercing.\n", "\n", "The bathroom is sleek and modern, featuring dark gray tiles and a large white sink with a metallic faucet in the background. The lighting is bright, highlighting her features and the shiny surfaces of the bathroom. A mirror reflects part of her image, adding depth to the scene. The setting is intimate and sensual, with the woman's confident stance and expression suggesting a sense of empowerment and self-assuredness. The image is a photograph, captured with high resolution and sharpness, emphasizing the textures of her hair, skin, and clothing. The overall mood is provocative and bold.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 51.jpg\n", "\n", "...\n", "A young woman with a fair complexion and long, straight black hair styled in two pigtails stands against a bright pink background. Her eyes are strikingly green, and her expression is neutral with a hint of seduction. She is nude except for a black jacket hanging loosely around her shoulders. Her breasts are medium-sized and covered with black X-shaped pasties. A white collar with a large, round black button adorns her neck, and a small, silver belly button ring sparkles on her navel. She has a slim, toned physique with visible muscles and a flat stomach. A thin, silver chain with a small pendant hangs from her collar, adding a subtle touch of jewelry. There is a faint, translucent, viscous substance on her breasts and upper stomach, giving a slight sheen to her skin. The lighting is soft and even, highlighting her curves and the smoothness of her skin. The background is solid pink, creating a contrast that draws attention to her. The overall style is modern and hyper-realistic, with a focus on detailed anatomy and sensual presentation. There are no other objects or people in the image, making her the focal point. The image is likely intended for an adult audience.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 52.jpg\n", "\n", "...\n", "A highly detailed digital CGI image features a young woman with a light skin tone and a slim, athletic build. She has a pair of large, pointed cat ears on her head, giving her a feline appearance. Her hair is long, wavy, and platinum blonde, with a few strands covering her face, partially obscuring her eyes and giving her a mysterious look.\n", "\n", "She is wearing a provocative outfit that includes a small, red and black bikini top with a bow in the center that barely covers her ample breasts, revealing her nipples. Her black thong bottoms are minimal and accentuate her toned abs and flat stomach. Her outfit is completed with brown, fur-lined sleeves that end above her elbows and a pair of olive green pants that are unbuttoned, exposing her thighs.\n", "\n", "She is adorned with a red choker around her neck, and a small, black mask is placed over her left eye, adding a touch of mystery to her look. Her expression is sultry, with a slight smile and a seductive gaze directed at the camera.\n", "\n", "The background is a dark, solid color, making her the focal point of the image. The lighting is dramatic, casting a warm glow that highlights the contours of her body and the textures of her outfit and fur-lined sleeves.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 53.jpg\n", "\n", "...\n", "A young woman with pale skin and platinum blonde hair styled in a short bob is posing provocatively in a suggestive, erotic manner. She is wearing a black harness that accentuates her large, perky breasts, with pasties covering her nipples. Her expression is sultry, with her mouth slightly open and tongue sticking out. She is also wearing white thigh-high stockings and black fingerless gloves, adding to the BDSM-inspired look. Her navel is pierced with a small silver ring. \n", "\n", "The background is a minimalist, white-walled room with a soft blue light casting a cool tone over the scene. The lighting highlights the contours of her body, emphasizing her toned physique. A white bandage is wrapped around her right thigh, and another white bandage is placed on her left thigh, with a small amount of a clear liquid, likely lubricant or a bodily fluid, visible on the bandages. \n", "\n", "Her left leg is raised and bent at the knee, with her foot resting on the floor, while her right leg is bent at the knee, with her foot pointed towards the camera. The composition of the image is dynamic, with the camera positioned low to capture her in an intimate, close-up manner. The overall aesthetic of the photograph is erotic and fetishistic.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 54.jpg\n", "\n", "...\n", "A young woman in a cosplay pose, dressed as a character from the anime \"Black Clover.\" She's a light-skinned woman with an athletic build, sporting a blonde wig with spiky bangs and long, straight blonde hair that falls down her back. Her face is partially hidden by a white mask with a black strip across the mouth and nose, giving her a mysterious and intimidating look.\n", "\n", "She's wearing a revealing white and black outfit that covers her arms and chest but leaves her midriff exposed, showing off a flat, toned stomach. The outfit is made of shiny, latex-like fabric that highlights her curves and muscles. She also wears white pants with black accents, featuring a high waist and a long, black belt with a silver buckle. Her large breasts are partially covered by the mask, and a silver belly button ring is visible on her navel.\n", "\n", "She holds a long, shiny sword with a silver hilt and a black blade, which she grips with both hands. The background is dark and indistinct, with a hint of a window or door on the left side. The lighting is bright, creating sharp shadows and emphasizing her figure. The scene is set in an indoor environment, likely a room or a stage.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 55.jpg\n", "\n", "...\n", "A woman in a bathroom is posing provocatively. She has long, straight black hair that flows over her shoulders and down her back. Her skin is light and her makeup is bold, with dark eyeliner and long false eyelashes. She has full, luscious lips painted in a deep shade. Her eyes are a striking blue color.\n", "\n", "She is wearing a tight, shiny, turquoise blue one-piece swimsuit with a leopard print design. The swimsuit has a plunging neckline that accentuates her ample cleavage and a zipper running down the front. Around her neck is a black choker with silver studs, adding to the edgy vibe.\n", "\n", "Her arms are adorned with long, black, latex gloves that reach past her elbows. She is also wearing black fishnet stockings that cover her legs and thighs, adding a risqué touch. In her hands, she holds a black hairbrush, which she is using to brush her hair.\n", "\n", "The background is a modern bathroom with white tiles and a mosaic tile wall that features a mix of black and grey hues. The lighting is bright and even, highlighting the woman's features and the textures of her outfit and accessories. The overall mood is sexy and daring.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 56.jpg\n", "\n", "...\n", "A young woman with an athletic build and tan skin poses confidently on a beach during the golden hour. She has long, straight black hair cascading over her shoulders and a striking tattoo sleeve on her right arm, featuring intricate designs. Her left arm is adorned with more tattoos, including a large floral design. She is wearing a vibrant, pink leopard-print bikini that accentuates her slender waist and medium-sized breasts. The bikini top has thin black straps and a halter neck, while the bottom is a thong with a matching pattern. Her expression is sultry, with her lips slightly parted and eyes gazing directly into the camera.\n", "\n", "She is standing on a sandy beach with a clear blue sky and a few scattered clouds in the background, indicating it is either early morning or late afternoon. The water is calm and stretches out to the horizon, adding to the serene and peaceful atmosphere. The textures of the sand, water, and her smooth skin contrast sharply, creating a visually striking image. The overall mood of the photograph is one of confidence and sensuality, enhanced by the warm light and the subject's bold attire and tattoos.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 57.jpg\n", "\n", "...\n", "A woman with a voluptuous figure and fair skin sits against a plain white background, striking a confident pose. She has long, straight black hair with red highlights, styled with a side part. Her makeup is bold, with dark eyeliner and mascara, and her lips are painted a vivid red.\n", "\n", "She's wearing a revealing, black latex bodysuit that covers her upper body and thighs but leaves her midriff, arms, and legs exposed. The bodysuit features a strappy, halter-neck design that accentuates her cleavage, and it's adorned with a black choker around her neck. Her ample breasts are prominently displayed, and the shiny latex material reflects light, highlighting her curves.\n", "\n", "She is also wearing long, black latex gloves that extend to her elbows, adding to the fetish-inspired outfit. Her legs are covered in black fishnet stockings, with the fishnet pattern visible through the latex bodysuit.\n", "\n", "The woman is sitting on a high, modern stool with a metallic frame, which contrasts with the stark white background. The overall style is provocative and fetishistic, with a focus on the woman's body and the shiny, glossy texture of the latex outfit. The image is a high-resolution photograph, capturing every detail of her outfit and makeup.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 58.jpg\n", "\n", "...\n", "A stunning woman stands confidently against a bold red backdrop, striking a pose with her hands on her hips. Her long, flowing black hair, streaked with red highlights, cascades over her shoulders, adding a vibrant touch to her look. Her makeup is bold and dramatic, with dark eyeliner, thick eyelashes, and bright red lipstick that accentuates her full, luscious lips. She wears a tight, black latex bodysuit that clings to her curves, highlighting her voluptuous figure and ample cleavage. The bodysuit is shiny and reflective, catching the light and adding a glossy texture to her outfit. She completes her outfit with high-heeled black platform boots that add height and a fierce, edgy vibe. Around her neck, she sports a black choker that adds an element of BDSM to her style. The red background contrasts sharply with her outfit, making her stand out even more. Her skin is fair, and she has a slender yet curvaceous body. The lighting is bright and even, eliminating any shadows and making her the focal point of the image. The overall style of the photograph is modern and bold, emphasizing the boldness and confidence of the subject.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 59.jpg\n", "\n", "...\n", "A photograph of a young woman with a pale complexion and striking blue eyes. Her hair is styled in a casual yet chic manner, with loose strands framing her face and a few pulled back to show her ears. She has a slender neck and a slightly pouty mouth with a light pink hue, accented by a touch of gloss. Her facial expression is intense and slightly serious, with a direct gaze and furrowed brow.\n", "\n", "She is wearing a black latex outfit with vibrant pink and red accents on the shoulders, adding a bold and striking contrast to her otherwise neutral appearance. The outfit is form-fitting and glossy, highlighting her curves and emphasizing her ample cleavage, which is accentuated by the low neckline and zipper. The texture of the latex is smooth and shiny, reflecting light and creating a sleek, almost futuristic look.\n", "\n", "The background is a gradient of teal and blue, with a blurred metallic structure, possibly a door or part of a spaceship. The lighting is soft but directional, casting subtle shadows that add depth and dimension to her features and outfit. The overall mood of the photograph is intense and edgy, with a focus on the subject's confident and commanding presence.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 60.jpg\n", "\n", "...\n", "A woman poses against a plain, off-white wall. She has long, wavy blonde hair cascading over her shoulders and a fair complexion. Her face is partially covered by a black leather mask with large, upright rabbit ears, giving her a playful, fetishistic vibe. Her eyes are accentuated by the mask, and she wears a seductive smile.\n", "\n", "She is dressed in a revealing, shiny latex outfit. Her top is a black leather harness bra with strappy details, which accentuates her ample breasts. The bra has thin straps and a crisscross design, highlighting her cleavage and midriff. Her bottom half features a short, pleated yellow latex skirt that clings to her hips, showing off her toned legs and curvy figure.\n", "\n", "In her right hand, she holds a small, black riding crop, adding a BDSM element to her look. The crop is made of soft leather, with a black handle and a flexible tip. Her left hand rests on her hip, showcasing her confidence and allure.\n", "\n", "The lighting is soft and warm, casting gentle shadows that enhance the contours of her body and the texture of her latex outfit. The overall mood of the image is sensual and provocative, combining elements of fetish wear with a playful bunny theme.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 61.jpg\n", "\n", "...\n", "A woman stands with her back to the camera, facing a plain white background. She has a fit, athletic build with well-defined muscles in her back and legs. Her skin is fair and smooth, with no visible tattoos or body art. Her hair is platinum blonde, styled in a short bob that reaches just below her shoulders. She is wearing a tight, shiny, purple latex corset that accentuates her waist and curves. The corset features a deep V-neck and a pleated, short skirt that flares out slightly, ending just above her knees. The corset has black lacing up the back, adding a touch of vintage style. She is also wearing black fishnet stockings that cover her legs from mid-thigh to ankle. On her feet, she wears black, high-heeled platform boots with a stiletto heel, adding to the overall edgy, fetish-inspired look. The lighting is bright and even, highlighting the shiny texture of the latex corset and the fishnet stockings. The image is clear and sharp, with no visible distractions, focusing entirely on the subject. The style of the photo is contemporary and provocative, with a focus on fetish fashion and latex wear.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 62.jpg\n", "\n", "...\n", "A woman with a fair complexion and long, straight black hair styled in a sleek ponytail stands in a dimly lit room. She has full lips painted in a nude shade and wears black eyeliner, mascara, and a hint of blush on her cheeks, highlighting her striking features. Her eyebrows are perfectly arched, and she has a subtle, confident smile. Her expression is composed and inviting.\n", "\n", "She is wearing a tight, shiny, bright pink latex dress that hugs her figure. The dress has a halter neck design with a high collar that forms a deep V-shape in the front, accentuating her cleavage. Black leather straps crisscross her chest and abdomen, connected by metal rings, adding a BDSM-inspired touch. The dress is made of a glossy material that reflects light, giving it a sleek and polished look.\n", "\n", "The background is dark and blurry, making the subject stand out. The lighting is soft, creating subtle shadows that enhance the contours of her body. The overall style of the photograph is sensual and provocative, with a focus on the subject's attire and expression. The image captures a moment of confidence and allure, showcasing the woman's beauty and the boldness of her attire.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 63.jpg\n", "\n", "...\n", "A striking photograph of a young woman with an Asian ethnicity, standing confidently in a dimly lit, vibrant nightclub. Her skin is a light brown tone, and she has a curvy physique with a prominent hourglass figure. Her large, ample breasts are accentuated by the tight, black latex bodysuit she wears, which has a harness-like design with straps over her shoulders and chest, creating an edgy, fetish look. Her long, black hair is styled with blunt bangs, and she has a pair of black cat ears attached to a headband, adding to her gothic, alternative aesthetic. Her makeup is bold, with dark eyeshadow and bright pink lipstick, enhancing her sultry look.\n", "\n", "She's accessorized with red, long, latex gloves that extend past her elbows, and fishnet stockings that reveal her tattoos, which include intricate designs on her arms, chest, and thighs. Her red nails match her gloves, adding a pop of color to her outfit.\n", "\n", "The background is filled with other patrons, dressed in various styles, creating a lively atmosphere. The nightclub is illuminated by red and blue stage lights, casting a dramatic glow over the scene. The overall mood is electric and intense, capturing the essence of a lively, alternative nightclub.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 64.jpg\n", "\n", "...\n", "A woman stands confidently in front of a simple, white wooden chair, with a light blue backdrop. She has long, straight black hair that falls over her shoulders, and she is wearing a bold blue and black outfit. The top is a form-fitting, blue halter top that accentuates her ample bust, while the bottom is a matching blue thong, which leaves her hips and buttocks exposed. She also has on black, thigh-high, shiny patent leather boots that reach her knees and have high platform heels, making her legs look even longer. Her skin is fair and smooth, with a slight tan. The woman is of Caucasian descent, with a curvy physique that includes a large bust, narrow waist, and ample hips. She is bending over slightly, leaning on the back of the chair, with her hands on the seat, accentuating her round buttocks. The lighting is bright and even, highlighting the textures of her outfit and the smoothness of her skin. There are no other objects or people in the image, keeping the focus entirely on her. The style of the photograph is glamour, emphasizing the woman's curves and the sensuality of her pose.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 65.jpg\n", "\n", "...\n", "A young Asian woman with pale skin sits in a luxurious, dark brown armchair with a fancy gold frame and a white fur trim. She has long, wavy black hair styled into two high pigtails with red ribbons. Her large, expressive blue eyes and delicate facial features give her a youthful, innocent look. She wears a skimpy black bikini top with pink accents and a small bow at the front, barely covering her small breasts. The bikini bottom is also black with pink details and a thin pink string tied around her hips. Her legs are clad in sheer black thigh-high stockings with silver studs, and she has a black choker around her neck with a circular pendant. A tattoo of a crescent moon decorates her left collarbone. Her right hand is on her chest, while her left hand rests on the arm of the chair. The background is a solid black, making her the focal point of the image. The textures in the photo include the smoothness of her skin, the shiny fabric of her bikini, and the softness of the fur trim on the chair. The overall style is a mix of cosplay and fetish fashion, emphasizing her youthful beauty and provocative attire.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 66.jpg\n", "\n", "...\n", "A stunning woman of mixed ethnicity, possibly Latina or mixed-race, stands confidently in front of a dark, paneled wall with a glossy black finish. Her long, wavy dark brown hair cascades over her shoulders, complementing her smooth, tan skin. She has a slender yet curvy physique, with medium-sized breasts partially covered by her hands, and a flat stomach. Her makeup is bold and sophisticated, featuring dark eyeliner, voluminous lashes, and a nude lip. She wears a black leather collar with a metal ring, which she holds in her gloved hands, and a matching set of black leather gloves that reach her elbows. The gloves are shiny and glossy, adding to the BDSM vibe. Her high-waisted black leather shorts are snug and form-fitting, accentuating her hips and waist. The lighting is soft yet dramatic, highlighting the contours of her body and the sheen of her accessories. The overall aesthetic is sultry and provocative, with a focus on the woman's confident and provocative demeanor. The background is minimal, ensuring all eyes are on her. This photograph captures the essence of high-end adult entertainment, blending fashion and eroticism with a high degree of attention to detail.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 67.jpg\n", "\n", "...\n", "A young woman strikes a pose in a cosplay-style photo, showcasing her anime-inspired getup. She's kneeling on one knee, turning her head to face the camera with a serious expression. Her long, straight black hair with red streaks flows down her back and shoulders, partially covered by a pair of red and white headphones. Her skin is light with a slight tan, and she's wearing light makeup that highlights her eyes and lips.\n", "\n", "Her outfit is a futuristic, sci-fi-inspired ensemble. She's rocking a light blue, long-sleeved jacket with black details and a shiny, metallic texture. The jacket is cropped to show off her midriff and features a black and red chest plate. She's wearing tight black shorts that accentuate her round, firm buttocks, paired with thigh-high, nude-colored stockings. Black knee-high boots with white stripes add to the outfit's high-tech vibe.\n", "\n", "Her pose is dynamic and confident, with her right hand gripping a futuristic, metallic gun, adding to the sci-fi feel. The background is simple, with a light blue wall and a wooden floor, keeping the focus on her. The lighting is bright and even, making her outfit's details and her skin tone stand out. The overall look is a mix of modern and anime aesthetics.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 68.jpg\n", "\n", "...\n", "A woman with a curvy figure and long, straight black hair styled in a high ponytail poses indoors in front of a large window with white wooden shutters. She has fair skin and is wearing a bold outfit that includes a black latex glove on her right arm and a red and black leopard print bodysuit that reveals her cleavage and accentuates her large breasts. Her makeup is striking, with dark eyeliner, thick black eyelashes, and red lipstick that matches the bodysuit. She is also wearing fishnet stockings and black thigh-high boots, adding to the provocative and edgy style of her outfit.\n", "\n", "The background features a lush green palm tree outside the window, indicating that the room is in a tropical or subtropical climate. The interior is well-lit, with natural light streaming in through the window, highlighting the textures of her outfit and the glossy finish of the latex glove. The overall aesthetic of the image is a blend of fetish fashion and tropical paradise, creating a striking and memorable visual. The image is a photograph, capturing the vivid details and textures of the scene.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 69.jpg\n", "\n", "...\n", "A young, East Asian woman poses provocatively, showcasing her slender, petite figure and fair skin. Her long, straight black hair with red tips cascades down her back and shoulders. She wears a daring black leather harness outfit with intricate straps, designed to accentuate her small breasts and slim waist. The harness features a zipper that runs down her stomach, adding to the edgy, fetish-inspired aesthetic. Her makeup is bold and dramatic, with heavy eyeliner and pink blush highlighting her cheekbones, giving her a striking, almost doll-like appearance.\n", "\n", "Her arms are raised above her head, hands clasped together, adding to the suggestive pose. She wears black thigh-high stockings with a shiny texture, and her feet are bare, emphasizing the contrast between her soft skin and the harsh, shiny fabric of the stockings. The background is a simple gradient of light gray to white, ensuring all attention remains on her. The image captures the textures of the leather harness and the glossy finish of the stockings, as well as the softness of her skin. The overall vibe is a mix of provocative and playful, with the woman's confident and seductive expression adding to the allure of the scene. The setting is a minimalist indoor environment, free of any additional objects or distractions.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 70.jpg\n", "\n", "...\n", "A woman is dressed in a black, glossy latex outfit that covers her entire body, including her face, with a hood that covers her head and a matching mask that covers her eyes, leaving only her lips and chin exposed. The outfit is made of shiny, glossy latex material that reflects light, creating a smooth, reflective surface. The latex is tight and form-fitting, hugging her body and accentuating her curves. She is wearing a high-necked top that covers her chest and a matching pair of latex pants that fit snugly around her hips and legs.\n", "\n", "The background is a deep blue, almost black, with a subtle, swirling pattern that adds a sense of depth and mystery to the image. The lighting is dim and focused on the subject, creating a dramatic and intense atmosphere. The colors and textures in the image are contrasted, with the shiny, reflective latex against the dark, almost velvety background. The overall mood of the image is sensual and provocative, with the tight, form-fitting latex emphasizing the subject's curves and the reflective surface adding a sense of allure and mystery.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 71.jpg\n", "\n", "...\n", "A provocative, high-fashion photo of a woman in a futuristic, latex outfit. The model, who appears to be of East Asian descent, stands confidently with one hand on her hip and the other resting on her thigh. Her skin is pale, and she has a slim, athletic build with a small bust and narrow waist. She is dressed in a shiny, black latex bodysuit with a high-neck collar and a high-cut skirt that exposes her thighs. The bodysuit has a glossy, wet look, and the skirt has a short, pleated design that accentuates her curves. She wears matching black latex gloves that reach past her elbows and thigh-high stockings. Her head is covered by a black latex mask that covers her entire face, leaving only her lips visible, giving her an androgynous appearance. The background is a dimly lit room with a large, ornate mirror that reflects part of the scene, adding depth to the image. The lighting is warm and golden, creating a sensual and dramatic atmosphere. The overall style is edgy and avant-garde, combining elements of fashion and fetish wear. The photo is taken by Fanton Fotography, as indicated by the watermark at the bottom of the image.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 72.jpg\n", "\n", "...\n", "A young woman with a light olive skin tone and long, straight black hair poses against a light blue background. She is kneeling, with her legs spread apart and her left leg slightly forward, emphasizing her shapely thighs and high-heeled boots. The boots are black and glossy, made of PVC material, and feature a very high platform with a stiletto heel, adding to their bold and provocative appearance.\n", "\n", "She is wearing a revealing blue latex bikini top with thin black straps that crisscross at the back. The top accentuates her medium-sized breasts and showcases her toned midriff. Her left arm is bent at the elbow, with her hand touching her cheek, while her right arm is bent at the elbow and her hand is on her knee.\n", "\n", "She is also wearing matching black latex fingerless gloves, which add to the overall fetishistic vibe of her outfit. The gloves have a shiny finish, reflecting the light and adding to the glossy look of her ensemble. The background is plain and uncluttered, drawing full attention to her striking outfit and pose. The overall style is a mix of fetish and gothic fashion, with a strong emphasis on latex and PVC materials.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 73.jpg\n", "\n", "...\n", "A young woman posing against a pink gradient background, dressed in a black bra and panties, with a red satin jacket draped over her arm. She has a pale skin tone, fair complexion, and short, platinum blonde hair styled in a bob with blunt bangs. Her makeup is bold, with heavy eyeliner and mascara accentuating her large, expressive eyes, and a hint of pink lipstick. Her lips are slightly parted, giving her a sultry expression. She wears a black choker necklace with a small cross pendant, and a ring on her left hand. Her figure is slender with a small to medium bust, and her physique is toned and athletic. She stands confidently with one leg bent and her foot resting on a chair, adding a sense of movement and dynamism to the image. The lighting is soft and even, highlighting her skin and the texture of her hair and clothing. The overall style is modern and glamorous, emphasizing the model's youth and sensuality. The background is a gradient of pink, creating a warm and inviting atmosphere. The image is a photograph, likely taken in a studio setting, with a professional and polished aesthetic.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 74.jpg\n", "\n", "...\n", "A young woman with fair skin and a slim, petite build is sitting on an ornate, gold velvet armchair. Her short, platinum blonde hair is styled in a bob with bangs, and she wears a blue, silk, Chinese-style dress with a high collar and a deep, plunging neckline that reveals her cleavage. The dress has intricate white embroidery and a blue ribbon tied in a bow at the back. She also wears long, navy blue opera gloves that extend past her elbows and black thigh-high stockings with lace tops. Her feet are adorned with black, high-heeled shoes. In her right hand, she holds a long, black, double-edged sword with a red line along the blade, which she rests on her lap. Her expression is serious, with a hint of confidence.\n", "\n", "The background is dimly lit, with dark curtains that make the scene more intimate and mysterious. The armchair has intricate wood carvings on the base, adding to the luxurious and royal feel of the setting. The floor is made of smooth, grey stone, contributing to the overall dark and dramatic atmosphere. The lighting is soft, casting gentle shadows that highlight the contours of her body and the texture of her clothes. The scene is a mix of fantasy and elegance, evoking a sense of power and allure.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 75.jpg\n", "\n", "...\n", "A young woman is dressed in a cosplay outfit featuring a white strapless corset with pink accents, a black and pink bow tie, and a black skirt with pink details. She has long, wavy, pastel pink hair styled in high pigtails with pink ribbons, and large, pink bunny ears on her head. Her skin is fair, and she has a slender, petite build with a small bust. She is wearing white, long-sleeved gloves and pink wristbands. She has a tattoo on her left side that reads \"Rin\" in a delicate script. Her makeup is heavy, with pink eyeshadow, dark eyeliner, and bright pink lipstick, and she has a playful expression on her face.\n", "\n", "The background is a dimly lit room with a warm, orange glow coming from the lighting, creating a cozy and intimate atmosphere. There are large, ornate paintings on the walls, and a patterned rug is visible on the floor, adding to the luxurious and slightly vintage feel of the room. The overall style is a blend of modern cosplay and classic pinup aesthetics, with a focus on the character's feminine and playful appearance. The image is a high-resolution photograph, capturing the details of the costume and the background with clarity.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 76.jpg\n", "\n", "...\n", "A young woman poses against a futuristic backdrop, flaunting her skills as a cosplayer. She's rocking a striking, edgy look with long, platinum blonde hair styled in two braids and a white, sleeveless crop top that shows off her midriff and a pair of high-waisted beige shorts with a tribal design and multiple pockets. Her outfit is complete with black gloves that go up to her elbows, adding a tough, armored vibe.\n", "\n", "Her face is youthful and fair-skinned, with a serious yet determined expression. She has striking blue eyes and a full, slightly parted red lips. Her ears are adorned with small, intricate piercings.\n", "\n", "In her right hand, she grips a large, ornate sword with a red and black handle, its blade gleaming in the light. Her left arm is adorned with a large, intricate tattoo that stretches from her shoulder to her elbow.\n", "\n", "The background features a sleek, metallic, industrial setting with geometric patterns and a cool blue and white color scheme. The lighting is bright and even, highlighting her costume and the intricate details of her sword.\n", "\n", "The overall style is a mix of modern and fantasy, with a focus on warrior aesthetics. The photo captures a dynamic pose, with her body slightly angled to the side, emphasizing her confident stance.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 77.jpg\n", "\n", "...\n", "A young woman, likely in her early 20s, kneels on a concrete floor with a determined look on her face. She's dressed in a badass, futuristic outfit that combines a medieval knight's armor with modern elements. Her skin is light and her hair is long, silver, and braided, cascading down her back. She's wearing a sleeveless, light gray, armored chest piece with intricate patterns, and a matching helmet with a visor that covers her eyes. Around her neck is a thick, silver chain necklace with a large, metallic pendant, and her arms are covered in black, fingerless gloves that reach to her elbows. Her short, tan leather skirt is adorned with silver buckles and straps, and she has black thigh-high boots with straps and buckles, giving her a warrior-like appearance. Her expression is fierce and focused, with her lips slightly parted and her eyes narrowed.\n", "\n", "The background features a modern, industrial setting with large, white, vertical blinds that let in natural light, casting a soft glow over the scene. The floor is concrete with a few visible cracks and stains, adding to the gritty, urban feel of the photo. The overall mood is intense and dynamic, capturing the essence of a warrior poised for battle.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 78.jpg\n", "\n", "...\n", "A cosplay of a fantasy character with a gothic and erotic vibe, set in a dimly lit room filled with cherry blossom petals. The main subject is a young woman with fair skin and platinum blonde hair styled in a bob cut with a side fringe. She has small, delicate horns on her head and wears black, high-heeled platform shoes. Her outfit includes a tight, white corset top with black lacing and straps that accentuate her medium-sized breasts and narrow waist. A black harness with a gold pendant hangs around her neck, adding to her Gothic look. She also sports black fishnet gloves that extend up to her elbows and black thigh-high stockings with fishnet patterns. Her expression is sultry and inviting, with a slight smile on her lips.\n", "\n", "In the background, there are cherry blossom trees with pink flowers and dark green leaves, creating a romantic and enchanting atmosphere. The room is dimly lit with red and black accents, contributing to the gothic feel. A black and red Japanese-style folding screen is partially visible on the left side, adding to the fantasy and cultural elements of the scene. The overall composition is rich in texture and color, with the focus on the character's attire and the intricate details of the cherry blossoms.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 79.jpg\n", "\n", "...\n", "A young woman with a slender build stands outdoors near a body of water, possibly a lake or ocean. She has a pale skin tone and long, straight red hair styled in a loose braid with a black hairpin. Her hair is complemented by a black headband adorned with a small black flower. She wears a traditional Japanese kimono with a striking red and black color scheme. The kimono is made of a sheer, glossy fabric with intricate patterns and designs, including geometric shapes and abstract motifs. A red sash with black accents cinches her waist, adding to the dramatic flair of her outfit.\n", "\n", "In her hands, she holds a large, round Japanese parasol with a red canopy and black ribbons. The parasol is open and provides some shade, casting a gentle shadow on her face and body. Behind her, the background features a calm, blue body of water with a clear sky, creating a serene and peaceful atmosphere. To her left, a tree with dark bark and green leaves frames part of the scene, adding a natural touch to the composition. The overall mood of the photograph is elegant and whimsical, capturing the beauty of traditional Japanese fashion against a tranquil natural backdrop.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 80.jpg\n", "\n", "...\n", "A young woman in a cosplay photo with a fantasy or gothic vibe. She has fair skin and long, straight blonde hair styled into two high pigtails with black ribbons. Her hair is accented with a small black hair clip on the right side. Her large, expressive blue eyes are highlighted with light makeup, giving her a doll-like appearance. She wears a black choker with a metal ring, adding to her edgy look.\n", "\n", "Her outfit is a black and white bodysuit with a corset-like top and a skirt. The top is white with black straps crisscrossing over her chest, showing off her medium-sized breasts. The skirt is black and features white lace trim with metal rings along the sides. Her long sleeves have black and white lace cuffs, and she wears black fingerless gloves. Her pose is confident and striking, with one hand raised and the other on her hip, giving a sense of power and allure.\n", "\n", "The background is a simple, light gray gradient, making her the focal point of the image. The texture of her outfit is smooth and shiny, with the black parts contrasting against the lace and white fabric. The overall style is a mix of gothic and fantasy, with a focus on detailed and intricate clothing.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 81.jpg\n", "\n", "...\n", "A young woman with pale skin and a slender, petite figure sits on a wooden chair with ornate detailing, in a posh, old-fashioned room. Her dark hair is styled in a short bob with bangs and a white flower crown, giving her a playful yet sophisticated look. She's dressed in a sheer black dress with cutouts on the sides, showing off her small to medium-sized breasts and a slim waist. The dress is paired with black, see-through thigh-high stockings held up by garters, adding a touch of sensuality. Her makeup is minimal, with a focus on her large, expressive eyes and full lips, which are painted a deep red.\n", "\n", "The room has a vintage vibe, with teal green paneled walls and a patterned wooden floor with intricate floral designs. A wooden chest of drawers is placed behind her, decorated with a bouquet of pink flowers in a white vase. To her left, there is a plush armchair with a patterned cushion in shades of beige and green. The lighting is soft and warm, casting a gentle glow over her features and the room's decor, enhancing the intimate and luxurious atmosphere. The overall aesthetic is a blend of modern sensuality and classic elegance, creating a visually striking and evocative scene.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 82.jpg\n", "\n", "...\n", "A digital CGI rendering of a young woman with a slim build and fair skin. She sports a dark gray flat cap tilted to the side, adding a casual vibe to her outfit. Her long, dark brown hair is styled in two braids, cascading down her shoulders and neck. She is wearing a cropped, long-sleeved maroon sweater that reveals her midriff and a white collared shirt peeking out underneath.\n", "\n", "Her outfit features a pair of high-waisted, baggy pants in a green and brown plaid pattern, accented with metallic blue studs and chains around the waist and knees, giving it a punk rock flair. The pants have a loose fit and flare at the bottom, ending in black platform boots with chunky soles, adding to the edgy style.\n", "\n", "She carries a green and white striped satchel over her shoulder, which contrasts with her maroon sweater. The background is a plain white, ensuring the focus remains solely on her unique and eclectic fashion style. The image is detailed, with textures and fabrics rendered realistically, capturing the essence of streetwear and punk fashion. The overall look is a mix of casual and rebellious, with a strong emphasis on individual expression and non-conformity.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 83.jpg\n", "\n", "...\n", "A young woman stands against a plain white background, showcasing a gothic or punk rock style. She has a light to medium skin tone and wears her straight, long black hair in two braids. Her dark eyes are accentuated with heavy makeup, including smoky eyeliner and dark eyeshadow.\n", "\n", "She sports a black, wide-brimmed flat cap with a dark grey band. Around her neck is a black choker with a silver chain and a small pendant. Her outfit includes a black, long-sleeved, ribbed sweater with horizontal pink stripes on the sleeves and a white text print on the chest. The text is stylized and reads, \"I am not what you think I am, I am not what you think I am, I am not what you think I am, I am not what you think I am, I am not what you think I am, I am not what you think I am, I am not what you think I am, I am not what you think I am, I am not what you think I am, I am not what you think I am, I am not what you think I am, I am not what you think I am, I am not what you think I am, I am not what you think I am, I am not what you think I am, I\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 84.jpg\n", "\n", "...\n", "A young woman stands against a plain white backdrop, exuding a gothic, punk vibe with her attire and accessories. She's dressed in a sleeveless, strapless dress in deep maroon with a striking, dark maroon pattern on the sides. The dress features a corset-like bodice with lacing in the front, which adds a bit of an edgy, Victorian feel. The skirt is short and flared, giving her outfit a playful, flirty touch.\n", "\n", "Her legs are wrapped in sheer, black polka dot tights, adding a pop of pattern to the otherwise solid colors. She's rocking black, platform ankle boots with chunky soles and laces, giving her a rebellious, punk aesthetic.\n", "\n", "Her hair is styled in a sleek, high ponytail with a large, black bow at the end, complementing her gothic look. She accessorizes with a simple pearl necklace and minimal makeup, with heavy black eyeliner and dark lipstick that accentuate her sharp, angular features.\n", "\n", "In her right hand, she holds a black, leather-like trench coat, adding a layer of depth and texture to the image. Her pose is confident and poised, with her head slightly tilted to one side, and her gaze directed forward, exuding a sense of strength and individuality.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 85.jpg\n", "\n", "...\n", "A young woman stands against a plain white background, facing slightly to her right. She has fair skin and long, wavy blonde hair that cascades over her shoulders. Her hair is styled with loose waves, and she has light makeup, with pink lipstick and subtle eye makeup. She wears a vibrant, short dress made of sheer, lightweight fabric with a floral pattern in shades of pink and red. The dress has long, bell-shaped sleeves that are sheer and decorated with small, pink floral appliqués. The bodice of the dress is fitted, with a square neckline and a ruffled hem, adding a touch of femininity and playfulness to the outfit. Around her neck, she wears a choker with a large, red rose, adding a touch of elegance and romance to her look. She also has a matching rose brooch pinned on the front of her dress, bringing a cohesive element to her outfit. Her legs are covered in black, sheer stockings that contrast with the bright dress, and her hands are casually resting at her sides. The overall style is a mix of vintage and bohemian, with a focus on delicate textures and feminine details. The image is well-lit, with soft shadows that highlight the contours of her body and the details of her dress.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 86.jpg\n", "\n", "...\n", "A young woman stands confidently against a plain white backdrop, showcasing her slim, athletic physique. Her skin is a warm, light brown, and she has long, dark braided hair that cascades down her back. Her facial features are sharp and symmetrical, with high cheekbones, full lips, and almond-shaped eyes. She wears a daring, high-waisted, one-piece outfit made of a shimmering, metallic turquoise fabric that clings to her body, revealing her nipples through the sheer material. The outfit features long, flowing sleeves that drape down her arms, adding a touch of elegance to the outfit. Her high-waisted bottoms are cut high, accentuating her toned legs and hips. She also wears white thigh-high stockings that are slightly fluffy at the top, adding a touch of softness to her outfit. The texture of the turquoise fabric is smooth and glossy, catching the light and creating a reflective, almost mirror-like surface. Her expression is neutral, with a hint of confidence and poise. The overall look is modern and avant-garde, with a focus on bold, edgy fashion. The photograph is taken in a studio setting with even lighting, ensuring that every detail of her outfit and body are clearly visible.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 87.jpg\n", "\n", "...\n", "A highly detailed, hyper-realistic CGI rendering of a young woman with fair skin and long, silver hair cascading down her back. She has large, expressive eyes and a delicate, heart-shaped face with a soft, inviting expression. Her lips are slightly parted, and her cheeks have a rosy hue. She is posed seductively with her arms raised above her head, which elongates her neck and highlights her slender, athletic physique. Her breasts are large and prominent, accentuated by the tight, black lingerie she wears, which is a sheer, lace-trimmed bodysuit with a plunging neckline that reveals her ample cleavage. The lingerie is torn in several places, revealing more of her skin beneath.\n", "\n", "The background is a dimly lit room with large, dark windows, suggesting it's nighttime. The lighting casts a warm, golden glow on her body, contrasting with the cool, blue tones of the room. The floor is covered in a rough, stone texture, adding to the gritty, industrial atmosphere. The setting is reminiscent of a post-apocalyptic or dystopian environment, with elements like broken glass and metal frames scattered around. The overall tone is dark and moody, with a focus on the sensuality and vulnerability of the subject.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 88.jpg\n", "\n", "...\n", "A young woman stands against a plain white background, showcasing her bold and eccentric fashion. She is of East Asian descent, with a medium skin tone and a slender yet curvy physique. Her long, dark hair is styled in a slick, high ponytail, adding to her edgy vibe. She wears a bright, pastel pink bra that peeks out from under a long, sheer, mint green coat adorned with shiny, iridescent sequins in shades of blue, green, and silver. The coat is lined with fluffy, faux fur in the same mint green, creating a striking contrast against the shiny sequins. She completes her look with a pair of high-heeled boots that are metallic and reflective, featuring a gradient of pink, purple, and silver, and a chunky, stiletto heel. The boots have a slightly futuristic, sci-fi look, adding to the overall avant-garde aesthetic of her outfit. Around her neck, she wears a delicate, silver choker with a small, round pendant. Her makeup is bold and dramatic, with dark eyeliner, mascara, and a nude lip color. The entire outfit is designed to be eye-catching and attention-grabbing, with a mix of textures and colors that create a dynamic, vibrant ensemble.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 89.jpg\n", "\n", "...\n", "A young woman with light skin and blonde hair styled in two long braids hangs down past her shoulders. She's wearing a colorful, striped knit beanie hat with a wide brim that covers her head and extends down to her ears. The hat features alternating bands of purple, red, pink, green, and brown, creating a vibrant and playful look.\n", "\n", "Her face is clean and makeup is minimal, with a soft pink lipstick and light eyeshadow that highlights her eyes. She has a neutral expression, with slightly parted lips and a neutral look that gives off a calm and composed vibe.\n", "\n", "She's wearing a green velvet dress with a ribbed texture, featuring a high collar and a button-up front, adding a retro and whimsical touch to her outfit. The dress has a slight sheen, indicating a smooth and polished finish. She also wears a white fishnet top underneath, adding a layer of texture and contrast to her outfit.\n", "\n", "The background is a plain white, which makes the colors of her hat and dress pop and ensures that all attention is on her. The overall style is a mix of vintage and contemporary fashion, with a playful and whimsical twist.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 90.jpg\n", "\n", "...\n", "A young woman stands against a plain white background, showcasing a funky, eclectic outfit. Her dark skin and long, curly black hair are framed by a voluminous black fur hat, adding a wild touch to her look. She sports an oversized, long-sleeved, button-down shirt in a plaid pattern with red, green, and white stripes, hanging loosely over a black lace bralette that hints at her medium-sized breasts. Below, she wears loose, wide-legged olive green cargo pants with multiple pockets and a drawstring waistband. Her feet are clad in black, chunky platform sneakers, giving her an extra height boost.\n", "\n", "Accessories include a chunky black choker necklace with a pendant, a small, black leather bag slung across her body, and a black leather wristlet on her right wrist. Her expression is neutral, with a slight hint of a smirk. The style is a mix of streetwear and bohemian fashion, with a focus on comfort and bold colors and patterns. The hat and plaid shirt add a rugged, outdoorsy vibe, while the lace bralette and cargo pants bring a touch of edgy, urban chic. The background is completely white, making the subject the sole focus. The image is a professional photograph, with sharp details and clear, vibrant colors.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 91.jpg\n", "\n", "...\n", "A young woman stands against a plain white background, her long, straight, dark brown hair cascading over her shoulders. She has a light olive complexion and is wearing dramatic, bold makeup with dark eyeliner and black eyeshadow that accentuates her eyes, creating a fierce look. Her outfit consists of a high-neck, long-sleeved, velvet-like red jacket with a black zipper running down the middle, styled with black suspenders that crisscross over her chest and back. She pairs this with a black crop top that exposes her midriff, showcasing her toned abs. Around her waist, she wears a maroon belt with metal spikes, adding an edgy, rebellious vibe to her outfit. The belt has multiple layers of fabric and is secured with a black buckle. Her hands, adorned with several rings, rest on her belt, with her fingers spread apart, showcasing her long, manicured nails. The overall style of her outfit is a mix of gothic and punk, combining elements of both. The textures of her outfit vary from the smooth, shiny fabric of the jacket to the rough, spiked belt. Her expression is confident and fierce, with her eyes locked on the camera, enhancing the bold and rebellious mood of the image.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 92.jpg\n", "\n", "...\n", "A young woman stands against a plain white background, her long, dark hair cascading over her shoulders and down her back. She has a fair complexion and a slender, toned physique. Her outfit is a form-fitting, deep burgundy satin dress with a square neckline that highlights her bust and shoulders. The dress has a corset-like bodice with panels that create a fitted, hourglass silhouette. Two large red satin ribbons are tied in bows at the top of the dress, adding a playful and elegant touch. Around her neck is a burgundy satin choker with a small red rose, matching the ribbons and adding to the overall theme of the outfit.\n", "\n", "The woman's makeup is bold and striking, with dark eyeliner, mascara, and a deep burgundy lipstick that complements her dress. She has a confident and sultry expression, with her lips slightly parted and eyes gazing directly at the camera. The texture of the satin dress is smooth and shiny, catching the light and creating a glossy appearance. The background is completely white, ensuring that all attention is focused on the woman and her outfit. There are no other objects or people in the image, making it a simple and clean composition.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 93.jpg\n", "\n", "...\n", "A young woman stands against a plain white background, viewed from the side and slightly from behind. She has medium brown skin and long, straight, black hair that reaches her shoulders. Her back is turned to the camera, showing off her bare back and the back of her head. She is wearing a short, sleeveless dress made of shiny, maroon satin with a ribbon bow on the right shoulder. The dress has a fitted bodice with a zipper down the back, and a straight, short skirt that ends above her knees. The satin fabric has a smooth, glossy texture that reflects light, giving it a luxurious look. She is also wearing sheer black tights, which can be seen through the satin of the dress, adding a layer of texture and contrast to the outfit. The overall style of the dress is sleek and modern, with a focus on simplicity and elegance. The photograph is well-lit, with no shadows, emphasizing the texture and color of the dress. The background is completely white, with no distractions, ensuring that all attention is on the subject and her attire. The image is a professional studio shot, with a clear, sharp focus and even lighting.\n", "Prompt: Describe the image in 400 words\n", "...\n", "\n", "...caption for 94.jpg\n", "\n", "...\n", "A young woman stands against a plain white background, posing in profile to the left. She has a medium skin tone, long, wavy black hair that reaches her shoulders, and bangs framing her face. Her lips are full and slightly parted, with a neutral expression. She's wearing a black choker necklace with a small pendant.\n", "\n", "Her outfit is a rich, dark burgundy velvet corset with long, wide sleeves and a high neckline. The corset is adorned with a black fur trim at the top and bottom edges, giving it a luxurious and slightly gothic vibe. The corset has a fitted waist and cinches her midsection, accentuating her figure. She also wears a short, dark maroon skirt that is high-waisted, with a floral embroidery on the right side. The skirt is paired with thigh-high black stockings, held up by garter straps, and a black garter belt that adds to the overall gothic aesthetic.\n", "\n", "The fabric of her corset and skirt is smooth and slightly shiny, reflecting light. The overall style is a mix of gothic and Victorian, with a focus on rich textures and dark, dramatic colors. The background is completely white, ensuring the focus remains on the subject and her detailed outfit.\n" ] } ], "source": [ "suffix = 'jpg'\n", "\n", "for number in range(94):\n", " if number>20:break\n", " try:\n", " input_image = Image.open(f\"/content/{number+1}.{suffix}\").convert('RGB')\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"...\\n\\n...caption for {number+1}.{suffix}\\n\\n...\")\n", " print(caption)\n", " except:\n", " continue\n", "#----#\n" ] }, { "cell_type": "code", "source": [ "letter = 'B'\n", "suffix = 'jpg'\n", "\n", "for number in range(9):\n", " try:\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).{suffix}\").convert('RGB')\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).{suffix}\\n\\n\")\n", " print(caption)\n", " except:\n", " x = 1\n", " #-----#\n", "\n", " try:\n", " suffix = 'webp'\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).{suffix}\").convert('RGB')\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).{suffix}\\n\\n\")\n", " print(caption)\n", " except:\n", " continue\n", "#------#" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "YVzy1DZMUT1k", "outputId": "5a7e80ee-7783-4c3e-8ae6-516987d7e725" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Prompt: Describe the image in 400 words\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "/usr/local/lib/python3.10/dist-packages/bitsandbytes/autograd/_functions.py:315: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization\n", " warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "\n", "\n", "caption for B(1).jpg\n", "\n", "\n", "A young Caucasian woman stands against a plain white backdrop, her long, wavy blonde hair cascading over her shoulders. She has a slender, toned physique with fair skin and a subtle tan. Her facial features are delicate, with arched eyebrows, full lips, and a slight, elegant nose. She is dressed in a dark teal blue bikini-style lingerie set, consisting of a bra and matching thong that accentuates her small to medium-sized breasts and flat stomach. The bra has thin straps and a minimalistic design, while the thong features a high waistband and a low-cut design that highlights her hips and buttocks. She is also wearing black lace-trimmed thigh-high stockings that add a touch of sensuality to her outfit. On her feet are black platform high heels with clear platform soles, adding height to her slender frame and giving her a confident, commanding presence. Around her neck is a black choker, adding to the overall sultry and provocative look of her ensemble. The photograph is well-lit, with soft shadows that highlight the contours of her body and the textures of her clothing. The image is sharp and clear, with no other objects or distractions in the background, focusing entirely on the woman's attire and pose.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for B(1).webp\n", "\n", "\n", "A young woman stands against a plain white background, rocking a quirky, modern style. She's got fair skin and light blonde hair pulled back in a ponytail. She's wearing a teal, long-sleeved dress with a low neckline that hits just above her knees. The dress has a slightly textured, velvety feel. She's accessorized with a black leather crossbody bag featuring a pink and white logo on the front. A black and white checkered scarf is draped around her neck, tied with a black bow at the front.\n", "\n", "Her footwear is eye-catching, featuring black, lace-up, high-top combat boots with thick soles. The boots have a leopard print pattern on the sides, adding a wild touch to her look. On her head, she sports large, black, furry bunny ears, giving her a playful and whimsical vibe. The ears are attached to a black headband with a pink bow on top.\n", "\n", "Her makeup is minimal, with a natural look that complements her outfit. She stands confidently, with a slight tilt to her head, looking straight ahead. The overall style is a mix of streetwear and whimsical fashion, blending edgy elements with a touch of fantasy. The image is a photograph, captured in a studio setting with bright, even lighting.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for B(2).webp\n", "\n", "\n", "A young woman stands against a plain white background, facing the camera with a neutral expression. She has light blonde hair parted on the side, styled in loose waves that fall past her shoulders. Her skin is fair and smooth, and her makeup is minimal, focusing on her eyes with a hint of dark eyeliner.\n", "\n", "She wears a sleeveless, green velvet dress with a square neckline and thin straps. The dress features a high-waisted, pleated skirt that flares out slightly, giving it a retro, 1950s vibe. Her hands are covered in black lace gloves, which extend to her elbows and have intricate floral patterns, adding a touch of elegance and sophistication to her look.\n", "\n", "A red rose is pinned to the left side of her dress, adding a pop of color and a romantic touch to her outfit. She stands with her legs slightly apart, her arms hanging naturally by her sides, exuding a confident and poised demeanor. The lighting is even and soft, highlighting the texture and color of her dress and gloves, without casting any harsh shadows. The overall style of the photograph is modern and fashion-forward, focusing on the details and textures of the subject's outfit.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for B(2).webp\n", "\n", "\n", "A young woman stands against a plain white background, showcasing a striking green dress that's sleeveless with a square neckline and a fitted bodice. The dress flares out into a voluminous, layered skirt that adds a touch of whimsy and movement. Her fair skin contrasts with her light blonde hair, which is styled in loose waves and parted in the middle. She wears a black choker around her neck, adding a touch of edgy sophistication to her look. Her lips are a natural pink, and her makeup is minimal, focusing on her natural beauty.\n", "\n", "Her hands are adorned with black lace gloves, which add a touch of old-world elegance to her modern outfit. The gloves are long and fingerless, with intricate patterns and delicate lace textures. The dress fabric is a rich, vibrant green that catches the light, emphasizing its luxurious texture and shine.\n", "\n", "The overall vibe of the photograph is sophisticated and fashion-forward, blending modern and vintage elements. The stark white background ensures all eyes are on the subject, highlighting her attire and accessories. The image is crisp and clear, with even lighting that eliminates any shadows, creating a polished and professional look.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for B(3).webp\n", "\n", "\n", "A young woman stands confidently against a plain white background, showcasing a bold and edgy fashion style. Her long, wavy black hair cascades down her back, contrasting with her dark complexion. She wears a deep green velvet dress with wide bell sleeves and a plunging neckline, which accentuates her slender figure. The dress has a luxurious, slightly shiny texture that catches the light. Around her neck, she sports a delicate black choker, adding a touch of gothic flair to her look.\n", "\n", "Her legs are clad in black fishnet stockings, which have intricate patterns that add texture and a rebellious vibe to her outfit. She pairs the stockings with knee-high black platform boots that have chunky soles and a slight wedge heel, emphasizing her height and giving her an imposing presence.\n", "\n", "On her back, she carries a brown leather backpack with a large, intricate floral pattern, adding a bohemian touch to her outfit. Her makeup is bold, with dark eye shadow and bold red lipstick, complementing her overall dark and edgy aesthetic.\n", "\n", "The background is completely white, ensuring that all the attention is on her. The image captures the essence of alternative fashion, combining elements of gothic, bohemian, and punk styles.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for B(3).webp\n", "\n", "\n", "A young woman stands confidently against a plain white background, showcasing a striking fashion ensemble. She has a medium to dark skin tone, likely of South Asian or Middle Eastern descent, with long, wavy, dark brown hair cascading down her back. Her makeup is bold, featuring dark eyeliner, mascara, and deep burgundy lipstick, accentuating her strong facial features.\n", "\n", "She's dressed in a lush, dark green velvet dress that falls to mid-thigh and is cut in a V-neck, with a plunging neckline. The dress is adorned with a small, intricate cross necklace, adding a touch of religious symbolism. The sleeves are wide and bell-shaped, made of the same velvet fabric, and are slightly flared.\n", "\n", "Her footwear is a pair of towering, black platform boots with a chunky, square toe and chunky heel, reaching up to her knees. The boots are made of a shiny, patent leather material, adding a glossy finish to her ensemble.\n", "\n", "A black beanie covers her head, complementing her edgy look. A large, brown backpack with a tribal print is slung over her shoulder, adding a contrasting texture and pattern to her outfit. She is also wearing fishnet stockings, which add a hint of texture to her look. The overall style is a mix of gothic and grunge, with\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for B(4).webp\n", "\n", "\n", "A young woman stands confidently against a plain white background, exuding a gothic and edgy vibe. She has long, wavy black hair cascading down her back and shoulders. Her skin is fair and smooth, with a slight pink undertone.\n", "\n", "She wears a teal lace lingerie slip dress that hugs her slender frame, accentuating her small to medium-sized breasts. The dress features delicate black lace patterns and spaghetti straps, with a deep V-neckline that adds a hint of sophistication and allure. She accessorizes with a black lace choker, enhancing the gothic aesthetic.\n", "\n", "In her left hand, she holds a black, fringed pom-pom, adding a touch of playful cheerleader flair to her otherwise dark ensemble. Her legs are clad in knee-high black fishnet stockings with lace patterns, topped with black platform platform shoes that have chunky soles and white ankle straps.\n", "\n", "Her makeup is bold and dramatic, featuring dark eyeliner and mascara, and a deep red lipstick that complements her gothic style. The overall image is a striking blend of feminine and edgy elements, with a focus on her confident stance and the intricate details of her attire. The simple background ensures all eyes are on her.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for B(4).webp\n", "\n", "\n", "A young woman stands against a plain white background, showcasing a bold and edgy style. She has long, dark, wavy hair cascading over her shoulders and down her back, with a slight parting on the left side. Her skin is fair and smooth, with no visible blemishes.\n", "\n", "She's wearing a teal, lace-trimmed camisole dress that hugs her slender frame, highlighting her small breasts and slim waist. The dress features intricate black lace patterns and thin spaghetti straps, adding a touch of sophistication and femininity. Around her neck is a black choker with a small black pendant, complementing the outfit's dark color palette.\n", "\n", "In her left hand, she holds a black pom-pom, a popular accessory in cheerleading outfits, adding a playful and youthful vibe to her look. Her legs are adorned with knee-high, black lace-up socks that end just above her knees, paired with black platform high heels featuring chunky white platforms and ankle straps, giving her an extra inch of height and a bold, fashion-forward appearance.\n", "\n", "The overall aesthetic is a mix of goth and punk elements, with a focus on dark colors, lace details, and bold accessories. The plain white background ensures that all attention is on the model and her outfit.\n" ] } ] }, { "cell_type": "code", "source": [ "letter = 'C'\n", "suffix = 'jpg'\n", "\n", "for number in range(9):\n", " try:\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).{suffix}\").convert('RGB')\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).{suffix}\\n\\n\")\n", " print(caption)\n", " except:\n", " x = 1\n", " #-----#\n", "\n", " try:\n", " suffix = 'webp'\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).{suffix}\").convert('RGB')\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).{suffix}\\n\\n\")\n", " print(caption)\n", " except:\n", " continue\n", "#------#" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "iBNWPGbIWzTX", "outputId": "0da75a82-e0ef-46f2-b0c2-8f52dfa6b7fa" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for C(1).jpg\n", "\n", "\n", "A young Caucasian woman with light skin and long, wavy blonde hair cascading over her shoulders stands indoors in a library or study. She has a slender physique, medium-sized breasts, and a flat stomach. Her expression is sultry, with her lips slightly parted and a seductive gaze directed at the camera.\n", "\n", "She is dressed in a leather harness, which wraps around her body and leaves her bare from the waist up. The harness is brown with metal rings and buckles, adding a touch of BDSM to the scene. Her arms are restrained behind her back, with her hands gripping the leather straps, accentuating her bound state.\n", "\n", "The background features a wooden bookshelf filled with various books, giving the room a scholarly vibe. The books are arranged in different colors and sizes, adding depth and texture to the background. The lighting is soft, highlighting the woman's features and the texture of the leather harness.\n", "\n", "The overall mood of the photograph is intimate and provocative, with a focus on the woman's body and the details of the leather harness. The image is signed \"FyPhotography © 2012\" in the bottom right corner, indicating the photographer and the year it was taken.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for C(1).webp\n", "\n", "\n", "An East Asian woman with a fair complexion and light skin is posing in front of a plain, light-colored wall. She has short, platinum blonde hair styled in a high, voluminous bun, and wears red-framed glasses that accentuate her large, expressive eyes and full lips painted with a deep red lipstick. She holds a thin, light-colored cigarette in her right hand, placed near her lips, giving a seductive look.\n", "\n", "She is dressed in a revealing outfit that consists of a black, halter-neck bikini top that exposes most of her ample breasts, and a cream-colored, off-the-shoulder sweater that is pulled down to reveal her upper body. The sweater is made of a soft, knitted material that contrasts with the sleek, shiny fabric of her bikini top. Her physique is curvy, with a prominent hourglass shape, and she appears to be in her late twenties or early thirties.\n", "\n", "The background is simple, with no additional objects or decorations, keeping the focus on her. The lighting is soft and even, eliminating harsh shadows and highlighting her features and the textures of her clothing. The overall mood of the image is sensual and provocative, with an emphasis on her physical attributes and the allure of her appearance.\n" ] } ] }, { "cell_type": "code", "source": [ "letter = 'D'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "aJ06l10TW0TR", "outputId": "76782d5b-a9a0-4dcf-f20d-eb84256a7216" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for D(1).jpg\n", "\n", "\n", "A young woman stands in an outdoor, rustic setting with a wooden fence in the background. She is holding an assault rifle in her right hand, pointing it upward. The rifle has a bright purple muzzle and is painted with camouflage patterns. She is dressed in a military-style outfit, including a gray hoodie with the hood pulled up and camouflage pants with various patches and pockets. Her face is partially covered by a white mask with a black pattern, giving her a mysterious and tactical appearance. She wears black tactical gloves on her hands and black combat boots with thick soles.\n", "\n", "The background features a wooden fence with the words \"OP: FREE\" painted in black letters. The fence is weathered and has a rustic texture. Behind the fence, there is a green metal structure that looks like a gate or a barrier. The area around the fence is covered in grass and rocks, with some patches of dirt and gravel. The sky is overcast, indicating an impending rain or storm. The overall scene is rugged and ready for action, with the woman's attire and pose suggesting she is prepared for combat or survival scenarios. The colors in the image include the earthy tones of the fence and ground, the bright purple of the rifle, and the muted tones of her camouflage clothing.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for D(2).jpg\n", "\n", "\n", "A woman in full combat gear crouches in front of a brick building with a broken window. She's dressed in a black tactical vest and camouflage pants, with a black helmet and goggles covering her face. A combat knife is strapped to her leg, and she holds a rifle in her hands. The rifle has a scope and a stock, with a magazine attached to the front. Her long blonde hair is tied back, and she has a serious expression on her face, indicating she is ready for action. Her boots are black and sturdy, suitable for rugged terrain.\n", "\n", "The background shows a dilapidated brick building with exposed red bricks and a broken window. The ground is covered in dirt and debris, indicating a rough, abandoned area. The lighting is natural, suggesting the scene is outdoors during the day. The overall mood of the photograph is intense and focused, highlighting the readiness and preparedness of the soldier. The texture of her gear is rough and durable, while the bricks in the background are worn and crumbling, adding to the sense of decay and desolation. The colors in the photograph are muted and earthy, with the black of her gear standing out against the brown and red tones of the building.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for D(3).jpg\n", "\n", "\n", "A young woman stands against a plain, light beige wall, rocking a badass military-style getup. She's got a camo helmet with a black visor, a camo tactical vest, and a camo baseball cap with a logo on the front. Her long, straight brown hair peeks out from under the helmet. She's wearing a checkered long-sleeved shirt under the vest and tan cargo pants. The vest is decked out with multiple pouches, one of which is emblazoned with \"USMC,\" and she's got a large, beige tactical belt with pouches and a radio on her waist. Her right hand grips a large, black assault rifle with a tan sling, which is slung over her shoulder. Her face is partially hidden by the helmet's visor, but her lips are painted a bright red. The background is a simple, light beige wall, making her outfit and gear pop. The photo has a professional look, with sharp focus and high resolution, typical of a studio setting. The image is from the 2022 Ariel Lea collection, as indicated by the watermark in the bottom right corner. The woman's outfit and gear are a mix of tactical and military gear, giving her a tough, combat-ready appearance.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for D(4).jpg\n", "\n", "\n", "A young woman poses outdoors, leaning against a concrete wall with a beige, stucco finish. She stands on a wooden deck with planks that have a rustic, weathered look. The woman is dressed in a full tactical gear getup, complete with a camo-print jacket and matching cargo pants, both adorned with multiple pockets and straps. She has on a tan tactical vest loaded with pouches and gear, including a radio and other equipment. Her attire is completed with black gloves and combat boots.\n", "\n", "She has long, wavy brown hair that cascades over her shoulders, and she wears a black tactical helmet with a visor. Her facial expression is neutral, with a slight, confident smile. She holds a large, beige sniper rifle with a long, sleek barrel, resting it across her body and gripping it with both hands.\n", "\n", "The background is a plain, light-colored concrete wall, with no visible decorations or features. The overall scene gives off a military or tactical training vibe, with the woman's gear and pose suggesting she is either a soldier or a military enthusiast. The lighting is natural, likely from the sun, creating a clear and bright atmosphere.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for D(5).jpg\n", "\n", "\n", "A young woman is dressed up as a bunny girl, complete with large bunny ears and a fluffy tail. She has long, light brown hair that matches her bunny ears, which are attached to a headband. She wears a black, high-neck, long-sleeve bodysuit with a black and white graphic on the chest, and black gloves. A large, beige backpack is strapped to her back, adding to her combat-ready look. She is holding a large, black, semi-automatic rifle with a scope attached to the top, aiming it towards the camera, giving off a fierce and determined vibe. Her face is serious and focused, with a slight frown and narrowed eyes, indicating she's ready for action.\n", "\n", "The background is dark, with a futuristic, urban setting, featuring neon lights in red and blue hues, adding to the sci-fi feel. A blurred, red neon sign with a white cross shape is visible in the background, possibly indicating a nightclub or an underground rave. The overall lighting is dim and moody, with a mix of cool and warm tones, creating a tense and edgy atmosphere. The image captures a mix of fantasy and realism, blending the cute and innocent look of the bunny girl with the serious and intense theme of combat.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for D(6).jpg\n", "\n", "\n", "A young woman stands on a rooftop, surrounded by a cityscape. She has long, flowing silver hair tied up in a high ponytail, with a few strands framing her face. Her skin is fair and her eyes are large and expressive, with a hint of sadness. She's wearing a black tactical vest adorned with a variety of pouches and straps, holding a large, detailed gun with a scope. The vest has a camouflage pattern on the upper part and the word \"TACTICAL\" in white letters on the left shoulder. She also wears black gloves and a black jacket with a high collar, which is zipped up to her neck.\n", "\n", "In the background, the cityscape is filled with tall, modern buildings in shades of gray and white, with some having antennas and other structures on their roofs. The sky is overcast with a pale blue hue, giving a cool and slightly gloomy atmosphere to the scene. The rooftop has a metal railing around it, and the city below looks dense and busy, with many windows and rooftops visible.\n", "\n", "The overall mood of the image is somber and introspective, with the woman's attire and expression suggesting a sense of duty or vigilance. The photo captures a blend of realism and stylization, with a focus on the details and textures of the character's outfit\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for D(7).jpg\n", "\n", "\n", "A young woman is dressed in a school uniform, with a white blouse and a short pleated skirt featuring a camo pattern in green, brown, and black. She is wearing a large green bow tie, which adds a playful touch to her outfit. The skirt is short, exposing her thighs, and she is also wearing knee-high white socks. She has a short bob haircut with bangs, and her skin is fair. She is holding a black assault rifle with a scope attached to it, which she is pointing towards the camera. Her expression is serious, with a slight frown, and she appears to be in a state of readiness or alertness.\n", "\n", "The background features a white pipe or conduit running horizontally, with a blurred outdoor setting, suggesting an urban or industrial environment. The lighting is natural, indicating that the photograph was taken during the day. The textures in the image include the smooth fabric of her uniform, the rough texture of the rifle, and the softness of her skin. The overall composition of the image is striking, with the contrast between the school uniform and the military gear creating a powerful visual impact.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for D(8).jpg\n", "\n", "\n", "A young Asian woman sits outdoors, holding a black assault rifle with a scope and a magazine. She has a medium build and light skin, with long, straight black hair that flows past her shoulders. She wears a white shirt with the sleeves rolled up to her elbows, black fingerless gloves, and a black vest over her shirt. The vest has several pockets and a small patch on the left side. Her skirt is a short, pleated black dress, and she has a black choker around her neck. A green and black camouflage helmet with headphones sits on her head. \n", "\n", "She is wearing a black and white checkered bow tie and has a small, blue and white pin on her left shoulder. A red, white, and green flag hangs from her skirt, which adds a playful touch. She holds the rifle with both hands, gripping it tightly, and has a serious expression on her face, looking directly at the camera. The background is a white brick wall, and the lighting is natural, suggesting it is daytime. The overall style of the photograph is a mix of cosplay and military attire, creating a unique and striking visual.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for D(9).jpg\n", "\n", "\n", "A young woman in tactical gear stands outdoors, facing the camera and looking slightly to the left. She's a slender, fair-skinned woman with dark hair pulled back in a low ponytail. Her face is neutral, with slightly parted lips and a serious expression. She's dressed in a black tactical vest labeled \"MONOCAP\" with a red cross on the front, over a black long-sleeved shirt and olive green cargo pants. The pants have multiple pockets and a belt with a pouch and holster on the right side, and a red tactical knife sheath on the left thigh. She's wearing black fingerless gloves and has a tactical radio on her left shoulder strap. She's also holding a black rifle with a large scope and a red laser sight.\n", "\n", "The background features a large corrugated metal wall with a light gray color and a textured surface. To the left of the frame is a concrete wall or building, and the ground appears to be concrete. The lighting is natural and slightly overcast, casting soft shadows and highlighting the textures of her gear. The overall mood of the image is serious and focused, suggesting a military or law enforcement setting.\n" ] } ] }, { "cell_type": "code", "source": [ "letter = 'E'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "EYWVlpPeW0oU", "outputId": "27198d9e-5588-40af-bcd1-465002032548" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for E(1).jpg\n", "\n", "\n", "A young woman in her early 20s is striking a pose in a swanky, elegant living room. She's kneeling on a plush, black and white damask couch with intricate floral patterns. Her long, straight, light brown hair is styled into two buns on either side of her head, with two cat ears attached to the top of her head. She's wearing a tight, sleeveless, white dress with a high slit on the right side, revealing her thigh and black stockings. The dress has a red sash around her waist and red ribbons tied at the hem, adding a touch of color to her outfit. She's also wearing black gloves and red high-heeled shoes with red straps.\n", "\n", "In her hands, she holds a black gun, pointing it towards the camera, giving off a playful yet provocative vibe. Her expression is confident and slightly cheeky, with a hint of a smile. The background features a modern chandelier with crystal drops, adding a touch of glamour to the room. The walls are a light gray color, creating a clean and airy atmosphere. The whole scene exudes a mix of elegance and sensuality, with the woman's outfit and pose being the focal point.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for E(2).jpg\n", "\n", "\n", "A young woman with fair skin and long, straight, silvery hair that falls past her shoulders is crouched down on a concrete floor in a dimly lit, narrow hallway. She has a slender, petite build and is wearing a white blazer over a white shirt, a black tie, and a white mini skirt. Her legs are bare, and she's wearing black high-heeled shoes. Her expression is serious and focused, with her right hand resting on her cheek and her left hand on her knee.\n", "\n", "Next to her, leaning against the rough, gray concrete wall on her left, is a large, black assault rifle with a long barrel and a large magazine. The rifle is equipped with various accessories, including a scope and a bipod. The hallway is narrow and has white wooden walls on both sides, and there is a dark, open doorway at the end, leading into a dimly lit room.\n", "\n", "The overall mood of the image is intense and serious, with a focus on the woman's intense gaze and the presence of the rifle, suggesting a tense or dangerous situation. The textures in the image include the smooth fabric of the woman's clothes, the rough concrete wall, and the metallic sheen of the rifle.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for E(3).jpg\n", "\n", "\n", "A young woman in a cosplay costume sits on a white cloth-covered chair. She has fair skin and long, straight black hair with blue streaks. Her outfit includes a black sleeveless crop top with a blue and white graphic design, a white jacket with a blue emblem on the left sleeve, and tight black leather pants. She wears fingerless black gloves and black combat boots with blue laces. A black belt with a white emblem and a buckle is around her waist. She holds a black rifle with a scope on her left hand, resting it on her left thigh.\n", "\n", "Her right hand is raised, with her fingers splayed against her forehead, as if she is thinking or looking around. Her expression is neutral, with a slight hint of seriousness. A white cap with a blue emblem sits on her head, adding to the military or futuristic vibe of her outfit.\n", "\n", "The background is plain and white, with a white circle with a blue emblem and a white cross inside it hovering above her head, giving the image a futuristic or sci-fi feel. The textures of her outfit, the chair, and the rifle are detailed, with the leather of her pants and gloves looking smooth and shiny.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for E(4).jpg\n", "\n", "\n", "A young woman is seen from a low angle, looking upwards and holding a handgun with both hands. She has short, silver-gray hair with bangs partially covering her face, and she wears large, black headphones with cat ears. The headphones have a glossy texture and give her an anime-inspired look. Her skin is pale and her expression is focused and determined.\n", "\n", "She is dressed in a black, futuristic outfit that includes a short, tight jacket with long sleeves and a high collar, along with black leggings and thigh-high boots. The jacket has a sleek, metallic sheen that contrasts with the dark background. She is also wearing a black, high-collared top that is slightly see-through, revealing her midriff.\n", "\n", "The background is dark and blurry, with no discernible objects or details, which creates a dramatic and intense atmosphere. The lighting is dim, with highlights on her face and the gun, adding depth and realism to the image. The overall style of the image is dark and moody, reminiscent of cyberpunk or sci-fi aesthetics.\n", "\n", "The image is a photograph, likely taken with a professional camera, capturing the detailed textures and colors of the subject's outfit and the gun she is holding. The composition and lighting emphasize her determination and readiness for action.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for E(5).jpg\n", "\n", "\n", "A young woman stands confidently in a futuristic hallway. She has fair skin and long, straight, silvery-gray hair that partially covers her right eye, giving her a cool, mysterious look. Her outfit is a sleek, black tactical gear ensemble with a blue and black camo pattern. She wears a sleeveless vest with shoulder pads and black gloves that reach past her elbows. Her legs are wrapped in black tactical pants, with thigh-high black boots and knee pads. In her right hand, she holds a black assault rifle, with a long barrel and a stock that extends to her chest, gripping it tightly. The rifle has a sleek, modern design with a matte finish.\n", "\n", "The hallway is modern and industrial, with a glass railing on the right side and a blue and black checkered floor. The walls are a mix of white and black, and the ceiling features a grid of metal beams and red support structures. The lighting is bright and evenly distributed, highlighting her outfit and the surroundings. The overall vibe is a blend of military and sci-fi aesthetics, with a focus on practicality and functionality. The background is clear and well-lit, ensuring that the subject stands out prominently.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for E(6).jpg\n", "\n", "\n", "A young woman in a camo uniform kneels on one knee, aiming a rifle at the camera with both hands. She wears a full camouflage outfit, complete with a green and brown patterned jacket and pants, and black combat boots. Her jacket is fitted with a tactical vest that has several pouches and straps, and she has a large green backpack slung over her shoulder. Her hair is straight and black, cut to shoulder length with bangs that cover her forehead. The background is a plain white studio setup, with no other objects or people visible. The lighting is bright and even, eliminating shadows and highlighting the textures of her clothing and the details of her rifle. The rifle is black and has a long barrel with a scope mounted on top, and it is equipped with various accessories such as a bipod and a sling. The overall style of the photo is realistic, capturing the essence of a military training scenario. The woman's facial expression is focused and serious, emphasizing her readiness and attention. The image is sharp and clear, with no visible digital manipulation. The artist's watermark is visible on the left side of the photo, indicating the photographer or the model's name.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for E(7).jpg\n", "\n", "\n", "A young girl in a school uniform is taking aim with a rifle in a forest. She's positioned slightly to the right of the center, aiming her rifle forward. Her dark brown bobbed hair is cut straight across her forehead, and she's wearing a navy blue blazer with gold buttons, a white shirt, and a gray tie with subtle patterns. Her expression is focused and serious, with her eyes narrowed as she concentrates on her target.\n", "\n", "The rifle she's holding is black with a scope mounted on top, and she's holding it with both hands, her right hand gripping the stock and her left hand on the scope. The rifle is mounted on a tripod, which is set up on a mossy rock in the foreground. The background features a forest with blurred trees and a rocky ground, creating a natural and rustic setting. The lighting is soft and natural, giving the scene a serene yet intense mood.\n", "\n", "The overall style of the photograph is dramatic and evocative, capturing a moment of tension and focus. The composition and colors are muted and earthy, enhancing the sense of realism and the natural setting. The title \"School Girl WORK\" is written at the bottom right corner, suggesting a theme of work or training.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for E(8).jpg\n", "\n", "\n", "A young East Asian woman stands on a balcony with a modern urban backdrop of buildings. She has fair skin and long, straight black hair styled in twin pigtails. She wears a schoolgirl outfit consisting of a white sailor-style top with a blue collar and a pleated blue skirt, typical of Japanese school uniforms. Over her head, she has a pair of large, black, military-grade headphones with a microphone attached, giving her a slightly futuristic look. She also wears a tactical belt with a holster and a magazine pouch, indicating she's ready for action.\n", "\n", "In her right hand, she grips a large, gray assault rifle, which she holds with a confident, steady stance. The rifle is equipped with various accessories, including a scope, a grenade launcher, and a tactical light, suggesting it's a modern combat weapon. The background shows a white metal railing and a blurred cityscape, with tall buildings and possibly a highway or bridge in the distance. The scene combines elements of cosplay and military gear, creating a striking contrast between the innocent schoolgirl look and the serious, tactical equipment she wears. The overall mood is a mix of playful and intense, highlighting the character's dual nature.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for E(9).jpg\n", "\n", "\n", "A young woman kneels on a grassy field, holding a military assault rifle. She has short, dark hair and wears a headset with a microphone, indicating she's ready for action. Her outfit is a camo-patterned vest over a white long-sleeve shirt, paired with a matching camo skirt. The vest is loaded with pouches and straps, and she's also wearing black gloves. Her expression is focused and determined.\n", "\n", "The background is an open field with a few trees and buildings, suggesting an outdoor training ground or a military base. The sky is clear, and the sunlight casts a natural glow over the scene. The grass is dry and patchy, typical of late summer or early fall.\n", "\n", "The texture of the woman's clothing is soft, while the rifle she holds is made of hard plastic and metal, giving it a sturdy and durable feel. The image is sharp and well-lit, with a slight blur in the background, emphasizing the subject in the foreground. The overall mood of the photograph is serious and military, with a focus on preparation and readiness.\n", "\n", "The composition is well-balanced, with the woman's figure centered and the rifle held at an angle that draws the eye toward it. The colors are earthy and muted, with greens and browns dominating the scene.\n" ] } ] }, { "cell_type": "code", "source": [ "letter = 'F'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "1HnMK8WfW07L", "outputId": "a2651707-6c86-4214-afb2-236c4d4e9937" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for F(1).jpg\n", "\n", "\n", "A young Asian woman stands in a dimly lit industrial setting, dressed in a dark outfit that includes a black blazer with a white collar, a black pleated skirt, and black knee-high socks. She has long, straight black hair with bangs, and she wears a stern expression. In her right hand, she holds a large, black assault rifle, and her left hand grips a bundle of yellow gloves.\n", "\n", "The background features a large, complex control panel with numerous buttons, switches, and dials, indicating a high-tech or military environment. The control panel is mounted on a metal frame, and pipes and cables run across the ceiling, adding to the industrial feel. The lighting is harsh and focused, creating deep shadows and highlighting the textures of the woman's clothing and the control panel.\n", "\n", "The woman's outfit and the setting suggest a scenario of high tension or urgency, possibly related to military or emergency operations. Her stance is confident and alert, with her feet shoulder-width apart and her body slightly bent forward, indicating readiness to act. The overall mood of the image is intense and serious, with a focus on the woman's preparedness and the functionality of her environment.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for F(2).jpg\n", "\n", "\n", "A young woman dressed in a futuristic, dark, gothic-inspired outfit stands in a dimly lit, industrial studio. She has fair skin and long, wavy black hair with white highlights, cascading over her shoulders and down her back. Her face is partially hidden by a black gas mask, with only her eyes and part of her nose visible. She wears cat ears on top of her head, adding a playful yet edgy touch to her look.\n", "\n", "Her outfit consists of a long, black, pleated skirt that reaches her mid-thigh, made of a soft, textured fabric. Over her skirt, she wears a black leather jacket with silver studs, giving it a tough, militaristic vibe. On her arms, she sports black gloves with metallic details, and around her waist, she has a black tactical belt with several pouches and holsters. She also carries a large, black backpack with a white wolf's head emblem on the front.\n", "\n", "In her hands, she holds a futuristic, high-powered rifle, which has a sleek, black design with a large scope and various attachments. The background is a dark, muted grey with a rough, concrete texture, enhancing the gritty, urban feel of the scene. The overall style is a mix of cyberpunk and gothic fashion, emphasizing the blend of practicality and\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for F(3).jpg\n", "\n", "\n", "A woman in a police uniform stands in a dimly lit room. She has light skin and is wearing a black helmet with a visor and a black face mask that covers her nose and mouth. Her blonde hair is visible underneath the helmet. She is dressed in a grey hoodie with the word \"POLIZEI\" in white letters on the back, and a black tactical vest with a yellow and black shield emblem on the left arm. She is also wearing blue jeans and black tactical boots with a tactical belt around her waist. Her arms are positioned in a ready stance, with her right arm bent and her left arm extended, holding a black handgun in her right hand.\n", "\n", "The room she is in has a dark wooden floor and white walls with a brick texture. There is a white door with a diamond pattern in the background, and a plant is visible on the left side of the image. The lighting in the room is low, creating a dramatic and moody atmosphere. The overall style of the photo is realistic and detailed, capturing the seriousness and readiness of the police officer.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for F(4).jpg\n", "\n", "\n", "A young person dressed in a futuristic, sci-fi inspired cosplay outfit is kneeling on a white floor with a plain white background. They have short, silver-gray hair and are wearing a black, full-face gas mask with rabbit ears, giving them a bunny-like appearance. The mask has a clear plastic visor with a reflective surface and a metal tube extending from the front, which appears to be a gas mask hose.\n", "\n", "They are dressed in a black, military-style tactical vest with multiple pockets and straps, adding a rugged, combat-ready look. The vest is worn over a black long-sleeved shirt, which is slightly unbuttoned to reveal a black, tight-fitting tank top underneath. Black, thigh-high stockings are worn with black, knee-high boots that have a metallic finish.\n", "\n", "In their hands, they hold a futuristic weapon that resembles a machine gun with a long barrel and a metallic frame. The weapon is held in a ready position, suggesting a sense of readiness or combat. The overall color palette is dark, with black and silver dominating the outfit, and the white background makes the cosplayer's outfit and accessories stand out prominently.\n", "\n", "The image captures a blend of realism and fantasy, with the cosplayer's outfit and accessories adding a high level of detail and realism, while the bunny ears and gas mask add a\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for F(5).jpg\n", "\n", "\n", "A cosplay photo of a woman dressed as a sexy bunny girl with a military twist. She has a light skin tone and short, dark brown hair with bangs. She wears a black bunny ears headband and a black mask that covers her nose and mouth, giving her an edgy, mysterious look. Her outfit consists of a black, strapless, leather-like bodysuit with a high neckline and ruffled edges, accentuating her figure and emphasizing her medium-sized breasts. She has a black, utility-style belt strapped across her chest, with a large, black assault rifle attached to it, adding a touch of realism and ruggedness to her costume. The rifle is held in a ready-to-fire position, suggesting a playful yet slightly intimidating pose. Her legs are covered in fishnet stockings, with black thigh-high boots completing the ensemble. The background is plain, with a beige wall, which makes the subject stand out. The lighting is warm, creating soft shadows that enhance the textures of her outfit and the details of the rifle. The overall style of the cosplay is a blend of fantasy and military themes, with a focus on detailed craftsmanship and a strong sense of character. The image exudes a combination of sensuality and power, making it both visually striking and thought-provoking.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for F(6).jpg\n", "\n", "\n", "A young woman is dressed in a Japanese schoolgirl uniform, complete with a navy blue pleated skirt and white blouse. She's wearing a black cap, black face mask, and black gloves, giving her a stealthy look. Her skin is light, and her straight black hair falls just below her chin. She's holding a large, black rifle with a scope, suggesting she's ready for action. The rifle has a sleek, modern design with various accessories, including a bipod at the front and a sling on the left side.\n", "\n", "The background features a metal mesh fence, typical of a school or park area, with a clear blue sky above. The fence has a grid pattern with a light gray color, and beyond it is an urban landscape with buildings and greenery. The lighting is bright, indicating it's daytime, with the sun casting shadows and highlighting the textures of her clothes and the rifle. The scene captures a mix of innocence and readiness, with the school uniform contrasting with the weapon. The composition and details emphasize the tension between her appearance and the gear she carries, hinting at a hidden or unexpected role.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for F(7).jpg\n", "\n", "\n", "A young woman with pale skin and long, straight, silver hair styled in a ponytail sits on a bench in a futuristic, high-tech room with large windows. She has striking blue eyes and wears a black bunny suit with a white bow tie and black gloves, featuring a white vest with a black bow tie and a black skirt. Her outfit also includes black thigh-high stockings and black knee-high boots with silver buckles. She holds a black assault rifle with a futuristic design, resting it across her lap. The room has a sleek, modern design with large, rectangular windows that have a grid pattern, allowing a view of a cityscape outside. The walls are made of a reflective, glass-like material that enhances the futuristic feel. The floor is made of a glossy, reflective material that matches the walls, creating a sense of depth and dimension. The lighting is bright and cool-toned, giving the scene a clean and sterile appearance. The overall style is a mix of sci-fi and cosplay, with a focus on high-tech weaponry and futuristic fashion. The composition is dynamic, with the woman's pose and the reflection of the windows adding depth and interest to the image.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for F(8).jpg\n", "\n", "\n", "A young woman in a school uniform stands in a dimly lit room, leaning against a white curtain. She has a petite build with straight, dark brown hair and bangs, and a light complexion. Her expression is serious and focused, with her eyes looking down. She's wearing a traditional Japanese school uniform with a white blouse and a red neckerchief, a navy blue pleated skirt, and a black sailor collar with white stripes. She has black tights covering her legs and a pair of tan gloves on her hands.\n", "\n", "In her left hand, she holds a large, black assault rifle with a scope and a sling over her shoulder, indicating she's ready for action. Her right hand is raised to her ear, as if she's on the phone. A camouflage-patterned utility belt is slung around her waist, holding a small pouch and a radio, adding a military touch to her outfit. The background features a dark wooden door with vertical slats, giving the room a rustic and slightly ominous feel. The lighting is dim, with soft shadows highlighting the textures of her uniform and the gun. The overall mood is intense and serious, suggesting a high-stakes scenario.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for F(9).jpg\n", "\n", "\n", "A young woman with long, straight, light blue hair styled with bangs and a headband with a blue and red pattern stands confidently against a plain light grey background. She has fair skin and a serious expression, with her eyes slightly narrowed and her lips pursed, as if she's focused on something. She wears a pair of large, black headphones with a microphone, indicating she's a gamer or a streamer.\n", "\n", "Her outfit is a military-inspired, tactical getup. She wears a black tactical vest with multiple pockets, straps, and pouches, including one on the back containing a coiled rope. The vest has a green camo print on the upper part and a brown leather belt with a buckle on the left side. Underneath, she wears a dark green jacket with a high collar, which adds to the military look.\n", "\n", "She holds a black assault rifle with a scope and a suppressor attached to the barrel. Her hands are gloved in black tactical gloves. The texture of her clothing is rugged, with a matte finish, and the fabric appears to be a blend of synthetic and leather materials. The overall vibe of the image is serious and intense, with a focus on military gear and combat readiness.\n" ] } ] }, { "cell_type": "code", "source": [ "letter = 'G'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "4l_2a7aSW5BS", "outputId": "54c41d4c-55af-4523-abde-2c458ad73b92" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for G(1).jpg\n", "\n", "\n", "A black and white photograph of a person dressed in a tactical gear cosplay, standing against a plain background. The person has a slender build and is wearing a dark hooded tactical vest over a black long-sleeved shirt. The hood has several mesh panels and the vest is adorned with numerous pockets and straps, including a large pouch on the front and a hydration bladder on the left shoulder. The person is also wearing fingerless gloves, black shorts, and high-top tactical boots with thick soles. A tactical backpack is slung over one shoulder, and a large assault rifle is held diagonally across the body, with the muzzle pointed down. The rifle has a long barrel and a stock that extends to the ground, and it is equipped with various attachments, including a scope and a vertical grip.\n", "\n", "The person's face is partially obscured by the hood and a mask with a menacing grin, giving a sinister and intimidating appearance. The mask has a skull design with a hollow, skeletal look. The background consists of a plain wall and a closed door with a simple handle, suggesting an indoor setting. The monochrome tones enhance the gritty and realistic aesthetic of the cosplay, emphasizing the textures and details of the tactical gear.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for G(2).jpg\n", "\n", "\n", "A young person is holding a military-style rifle, aiming it at the window. The person has short, straight, platinum blonde hair and is wearing a white short-sleeved shirt with a blue and white striped tie. The rifle is made of a dark brown color and has a long barrel with a suppressor attached to the end, indicating it is designed for stealth. The suppressor is made of metal and has a gray color. The rifle has various attachments, including a scope on top and a magazine attached to the bottom. The scope is large and has a brown color, and the magazine is black with a capacity indicator. The person's hands are steady and focused on the rifle, with their fingers gripping the handle and the buttstock. The background is a bright, well-lit room with white walls and a large window covered by sheer white curtains. The window lets in a lot of natural light, creating a soft, diffused light in the room. The room appears to be a modern, clean, and well-kept space, possibly a home or an office. The overall mood of the image is serious and focused, with an emphasis on the military aspect of the rifle.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for G(3).jpg\n", "\n", "\n", "A young woman, probably in her early twenties, is chilling in a dimly-lit, industrial space, with a concrete floor and metal beams in the background. She's sitting with her legs crossed and is rocking a schoolgirl outfit, complete with a white blouse with long sleeves, a blue bow tie, and a dark blue pleated skirt. Her straight black hair is shoulder-length, and she's sporting a backpack with straps over her shoulders. Her face is serious, and she's clutching a black, high-powered rifle with a scope on the end, which she's pointing at the camera. The rifle is detailed and realistic, with a black stock and a scope on the end. Her outfit is neat and pressed, giving off a clean and polished vibe. The lighting in the scene is soft and moody, casting shadows and highlights that add depth to the image. The background is a bit out of focus, emphasizing the subject and giving the scene a dramatic and intense feel. The overall style is realistic, with a focus on the subject's expression and the details of her outfit and the rifle. The image captures a moment of tension and preparedness in an urban setting.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for G(4).jpg\n", "\n", "\n", "Two women in a modern, urban setting, both wearing realistic, anime-style cosplay costumes. The woman on the left has short, straight blonde hair with bangs and is wearing a light blue school uniform with a white shirt, blue striped tie, and a beige military-style tactical vest. She holds a black rifle with a scope in her hands, gripping it firmly. Her expression is serious and focused, with her head slightly tilted down.\n", "\n", "The woman on the right has long, straight blonde hair with bangs and is wearing a similar light blue school uniform with a white shirt and blue striped tie. She is leaning over and resting her head on the other woman's shoulder, looking up with a gentle and caring expression. Her hands are gently placed on the other woman's shoulders, and she appears to be supporting her.\n", "\n", "The background features a concrete wall with a rough texture and a slightly off-white color, suggesting a modern, urban environment. There is a black backpack hanging from the woman on the left, indicating that they are possibly on the move or preparing for a mission. The lighting is natural, with soft shadows and highlights that emphasize the textures of their clothing and the details of their costumes. The overall mood of the image is one of tension and protectiveness.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for G(5).jpg\n", "\n", "\n", "A young woman with pale skin and long, flowing silver hair, which cascades over her shoulders and down her back. She wears a black headband, adding a touch of contrast to her otherwise monochromatic look. Her outfit includes a mustard yellow, high-necked blouse with puffed sleeves and a high waist, paired with a black miniskirt that accentuates her slender legs. She has black thigh-high socks and shiny black high-heeled boots, which add a touch of edginess to her otherwise modest attire.\n", "\n", "The woman is crouching on one knee, leaning slightly forward with her other leg bent and resting on the ground. She holds a large, black assault rifle in her right hand, pointing it forward with a serious expression on her face. The rifle has a metallic finish and is equipped with various accessories, including a scope and a tactical flashlight attached to the side.\n", "\n", "The background is a plain, light gray color, which makes her stand out and draws attention to her striking pose and the details of her outfit and the rifle. The overall style of the photograph is realistic, with a focus on the textures and details of her clothing and the rifle. The lighting is bright and even, highlighting the contours and textures of her outfit and the weapon.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for G(6).jpg\n", "\n", "\n", "A young woman in a futuristic, military-inspired getup kneels on the ground, holding a high-powered rifle in front of her face. She's got a slender build with fair skin and a head full of long, straight, white hair with pink highlights. Her eyes are hidden behind dark, rectangular glasses. She's wearing a black and gold military-style jacket with a high collar, featuring red and black accents and a red shoulder strap. The jacket has a high waist, giving off a structured and functional vibe. Over her jacket, she's got a long, black cape that drapes over her shoulders and down her back, with a white lining visible at the edges. Her pants are black and feature red accents, with knee-high black boots and black thigh-high socks.\n", "\n", "The background is a plain, dark grey, with a spotlight on her, making her stand out. The lighting highlights the texture of her jacket and the shiny, metallic look of her rifle. Her pose is intense, with her body slightly crouched and her left leg forward, giving a sense of movement and readiness. The overall mood is serious and intense, with a focus on her military gear and her readiness to engage. The image captures the essence of a modern-day warrior.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for G(7).jpg\n", "\n", "\n", "A young woman dressed as a sexy maid is caught mid-action, wielding a large black rifle. She stands in a dimly lit, industrial warehouse setting with metal beams and wooden crates visible in the background. The scene is gritty and raw, with a hint of danger in the air.\n", "\n", "The woman has a fair complexion and long, straight blonde hair styled with bangs. She wears a black maid headband with a white lace trim, adding a touch of class to her ensemble. Her outfit is a black and white maid outfit, featuring a short, cropped top with a white collar and red bow, paired with a black mini-skirt with a white ruffled hem. She is also wearing black thigh-high stockings and black high-heeled shoes, emphasizing her slender legs.\n", "\n", "In her hands, she holds a large, black rifle, which she is pointing forward. Her expression is intense, with a determined look on her face, and she is in mid-stride, suggesting she is moving towards something or someone.\n", "\n", "The colors in the image are mostly muted, with the black and white of her outfit standing out against the dark, industrial background. The texture of her outfit is smooth and shiny, while the rifle she holds has a matte finish. The overall mood of the image is one of tension and excitement.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for G(8).jpg\n", "\n", "\n", "A young woman in a cosplay outfit is kneeling and holding a rifle, ready to fire. Her outfit is a mashup of military and anime styles, with a white shirt, black blazer, and a red tie. She's also wearing black tights and thigh-high boots with silver buckles. Her long, straight red hair is styled in twin tails, with bangs framing her face. She has fair skin and a determined look on her face, with her mouth slightly open as if she's about to speak or shout.\n", "\n", "The background is a rugged, industrial setting with a lot of pipes and machinery, painted in muted blue and green tones. The ground is covered in dirt and gravel, giving the scene a gritty, realistic feel. The lighting is dim, creating shadows and emphasizing the textures of her outfit and the background.\n", "\n", "The woman's pose and expression suggest she's in a combat situation, ready to fight. Her rifle, which she's holding with both hands, has a wooden stock and a black barrel, adding to the military vibe. The overall mood is intense and action-packed, with a focus on the character's readiness and determination. The cosplay outfit and the setting create a vivid, immersive experience for the viewer.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for G(9).jpg\n", "\n", "\n", "A young woman with a fair complexion and a slim figure crouches on a wet pavement outside, holding a large, black submachine gun with a scope on top. Her expression is serious, with her mouth slightly open and eyes looking forward. She's dressed in a dark blue, military-style outfit consisting of a jacket with a yellow patch on the right sleeve and matching pants. The jacket has a high collar and zippers on the sides. She wears black gloves, a black headband, and black boots. Her short, platinum blonde hair is slightly messy, and she has a pair of sunglasses on her head.\n", "\n", "The background features lush, green foliage and a metal railing, suggesting she's in a park or a garden. Another person, dressed casually in a white shirt and dark pants, stands in the background, looking away from the camera. The ground is wet, indicating recent rain, and the sky is overcast, casting a soft, diffused light over the scene. The overall mood is tense and intense, with the woman's serious demeanor and the military gear adding to the atmosphere.\n", "\n", "The image is a photograph taken in natural daylight, capturing the details and textures of the subject and the surroundings vividly.\n" ] } ] }, { "cell_type": "code", "source": [ "letter = 'H'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "MB9qKZ3jW5T4", "outputId": "f996af17-e204-407b-d350-6c8346f799af" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for H(1).jpg\n", "\n", "\n", "A young woman kneels on a concrete floor, with her back to the camera, in a dimly lit industrial setting. She has long, flowing red hair cascading down her back. She's dressed in a provocative, military-inspired outfit, featuring a black beret, a short, pleated black skirt with red stripes, and a black leather jacket with red accents. She's also wearing black thigh-high stockings with a garter belt and red suspenders that are attached to a black leather garter belt around her waist. The garter belt is partially unbuttoned, revealing her bare buttocks, which are prominently displayed due to her kneeling position. Her black leather boots have thick soles and a chunky heel, adding to the rugged, edgy look of her outfit.\n", "\n", "In the background, there is a large, green industrial drum, and behind it, there is a partially open door revealing a fiery scene with orange and red flames and smoke, suggesting a dramatic or dangerous setting. The floor is made of concrete with a light, beige hue, and there are metal pipes and industrial equipment scattered around, contributing to the gritty, industrial atmosphere. The overall mood is one of rebellion and boldness, combining elements of fashion and streetwear with a dash of danger.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for H(2).jpg\n", "\n", "\n", "A young woman sits on a metal ladder, looking pensive and lost in thought. She has long, straight black hair with bangs and light skin. Her expression is serious, with her lips slightly parted and her eyes focused on something in the distance. She wears a white long-sleeved button-down shirt, which is unbuttoned at the collar, revealing a hint of cleavage. Over her shirt, she wears a dark blue tie, and a black backpack with a white logo is slung over her shoulder. Her legs are covered in black stockings, and she is barefoot, with her feet tucked under her body.\n", "\n", "In her right hand, she holds a cigarette, which she is about to smoke, and a lighter is held in her left hand. The background features a window with large panes, which are partly covered by a curtain on the left side. The window reflects a blurred image of a person, adding a sense of depth to the scene. The overall color palette is muted, with cool tones dominating the image. The ladder she sits on is metallic, adding a rugged texture to the setting. The image has a realistic and slightly moody aesthetic, with soft lighting that enhances the somber mood of the composition.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for H(3).jpg\n", "\n", "\n", "A young woman with long, straight black hair and bangs is dressed in a dark, futuristic outfit. She wears a black, high-collared, short-sleeved crop top with a purple under-layer, and a short, pleated black skirt with a high waist. Around her neck is a black leather mask with a slit for her mouth, and she has a black, furry tail attached to her back. Her arms are covered in black, armored gloves with thick padding, and she wears black thigh-high stockings with a shiny finish. She holds a black assault rifle in her right hand, with a silencer attached to the end, and a black tactical vest with multiple pouches strapped to her chest and waist.\n", "\n", "The background is a dimly lit, industrial-looking room with a high ceiling and exposed beams. The walls and floor are dark, with a metallic texture, and there are various pieces of military equipment scattered around, including a green gas mask hanging from the ceiling and a black metal table in the background. The overall mood of the image is intense and serious, with a focus on the character's combat-ready attire and the grim, industrial setting. The lighting is dramatic, highlighting the contours of her body and the details of her outfit.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for H(4).jpg\n", "\n", "\n", "A woman in a futuristic, military-inspired costume is crouching and aiming a gun through a narrow gap in a metal structure. The scene is bathed in a cool, monochromatic light that gives it an otherworldly, dystopian vibe. The woman has a pale skin tone and a slender build, with shoulder-length, straight, silver hair that contrasts sharply with her dark outfit. She is wearing a black, high-collared jacket with metallic accents and a high-tech, futuristic look. Her jacket is made of a mix of fabric and armor, with padding and straps for added protection and utility. Her gloves are black and look like they're made of a durable, flexible material. The gun she's holding is a futuristic weapon with a sleek, metallic design and a long barrel, suggesting it's meant for combat or hunting.\n", "\n", "The background features a metallic, industrial environment with large, angular structures and beams, giving the impression of a high-tech, post-apocalyptic setting. The colors in the image are mostly shades of gray and white, with the woman's silver hair and the metallic textures of her costume standing out. The overall mood of the image is tense and focused, capturing a moment of intense concentration and readiness.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for H(5).jpg\n", "\n", "\n", "A young woman is dressed up as a badass female soldier, strutting confidently with her head held high. Her outfit is a mix of military and combat gear. She's rocking a black hooded jacket with a high collar, leaving her face partially covered. The jacket has a tan tactical vest strapped over it, loaded with pouches and gear. Her pants are a dark, camo-print cargo style, with a bunch of pockets and straps, including a belt loaded with more pouches and gear. Her boots are black combat boots with heavy soles, perfect for stomping around rough terrain. She's also decked out with gloves, a tactical helmet, and a gun slung over her shoulder, ready for action.\n", "\n", "Her blonde hair peeks out from under the hood, and she's got a determined look on her face. The background is a concrete urban setting, with a blurred view of buildings and greenery, hinting at a cityscape. The sky is overcast, giving the scene a gloomy vibe. The concrete ground is cracked and worn, adding to the gritty, realistic feel. The colors are muted, with blacks, grays, and earth tones dominating the scene. This getup screams tactical and combat-ready, with a focus on practicality and functionality.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for H(6).jpg\n", "\n", "\n", "A person in a realistic military combat suit is lying on the ground, partially concealed by a large, black, highly detailed, and textured exoskeleton. The suit is made of dark, rugged fabric with numerous straps and buckles, giving it a tactical and functional appearance. The exoskeleton is a dark gray color with a metallic sheen, and it has a complex, mechanical design with multiple joints and segments. The person is lying on their back, legs bent, with one arm extended out to the side and the other tucked close to the body.\n", "\n", "The background features an outdoor setting with a paved surface made of light gray stone tiles. Behind the person, there is a mix of greenery and urban elements. Tall trees and bushes are visible, along with a playground structure with yellow and blue components. In the background, a few high-rise buildings with white and gray facades are visible, suggesting an urban park or recreational area. The overall mood of the image is both futuristic and gritty, capturing a moment of intense preparation or combat. The details in the suit and exoskeleton are highly realistic, enhancing the sense of realism in the scene. The person's body is partially obscured by the exoskeleton, adding to the mystery and intrigue of the image.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for H(7).jpg\n", "\n", "\n", "A young woman stands in front of a grand, ornate marble wall with classical sculptures and detailed carvings. She is dressed in a futuristic, anime-inspired outfit with a mix of military and schoolgirl elements. Her long, silver hair is adorned with a star-shaped hairclip and a black headband with a pink flower. She wears a purple and white school uniform top, complete with a sailor collar and a black bow, over a black bodysuit. Her top is decorated with various patches and badges, including a large one on the chest with Japanese kanji.\n", "\n", "She has a black jacket draped over her shoulders, featuring a white and red flag patch on the left sleeve. The jacket is worn over a black and red belt with multiple pouches, which add a tactical vibe to her look. She holds a long, black rifle with a scope in her right hand, and a black backpack hangs from her left shoulder. Her footwear consists of black combat boots with white socks.\n", "\n", "The background is a white marble wall with intricate carvings of mythological figures and columns. The overall setting is a blend of ancient and futuristic styles, adding to the contrast between her outfit and the classic surroundings.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for H(8).jpg\n", "\n", "\n", "A young woman with long, wavy black hair and cat ears on a headband is kneeling on a white floor surrounded by a mess of weapons. She's dressed in a futuristic, dark grey and black tactical outfit with various pockets and straps, including a belt with a holster and a mask over her mouth. The outfit has a mix of grey, black, and purple tones, with a high-collared top, long sleeves, and a short skirt that ends above her knees. She's wearing black thigh-high socks and black combat boots with thick soles, and her fingers are wrapped in black gloves. She holds a black assault rifle in her right hand, pointing it towards the viewer, and has another rifle slung over her back. Scattered around her are several other rifles and pistols, some in black and others in beige and brown, creating a chaotic, militaristic scene. The background is a plain white wall, with the weapons and ammunition adding a sense of urgency and danger. The image has a high-contrast, gritty look, with sharp shadows and intense colors, typical of digital art. The overall mood is one of readiness and tension, with a focus on military gear and the character's fierce and determined demeanor.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for H(9).jpg\n", "\n", "\n", "A young woman is lying on her back on a plain white floor, dressed in a futuristic, high-tech military getup. She's got long, flowing black hair that partially covers her face, and she's rocking cat ears on her head, giving her a cute, feline vibe. Her eyes are covered by a black mask, which adds to the mysterious and edgy look.\n", "\n", "She's decked out in a black tactical outfit with a chest rig and a bunch of pouches, gloves, and thigh-high boots that have thick soles and laces. The outfit is designed for maximum mobility and protection, with armored plates and a snug, form-fitting design.\n", "\n", "In her hands, she's holding a large, high-powered rifle with a scope and a suppressor, ready for action. Her body language screams alertness and readiness, with her legs slightly bent and her torso raised off the ground, her head turned to the side, and her eyes focused on something off-camera.\n", "\n", "The background is a simple gradient of light blues and whites, keeping all eyes on her. The lighting is bright and even, highlighting the textures of her outfit and the sleek, metallic look of her rifle. The overall vibe is futuristic, badass, and a bit otherworldly.\n" ] } ] }, { "cell_type": "code", "source": [ "letter = 'I'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "zOfslc64W5l4", "outputId": "8bfbfd63-88b9-417a-ba06-d41ac7788ea8" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for I(1).jpg\n", "\n", "\n", "Two soldiers in a modern military setting are shown in a tense, intense moment. The soldier in the foreground is a woman with light skin and dark hair, wearing a helmet with a camouflage pattern. She's holding a rifle with both hands, her face grim and focused. Her eyes are hidden behind black-framed glasses, and she's dressed in a camouflage jacket over a tactical vest with pouches and straps, giving her a rugged and prepared look. A patch on her chest features a white logo.\n", "\n", "Behind her, slightly to the right, stands another soldier, a man with a beard and glasses. He's also in a camo jacket and tactical vest, holding a similar rifle. He's positioned to the side, adding a sense of depth and action to the scene. The background is blurred, with a muted, industrial look, likely a warehouse or hangar, suggesting a realistic, combat-ready environment.\n", "\n", "The textures in the image are varied, from the smooth surfaces of the helmets and rifles to the rough, rugged fabric of the uniforms and vests. The lighting is dim, casting shadows and adding to the gritty, serious atmosphere. The overall style of the photograph is realistic and immersive, capturing the intensity and readiness of the soldiers.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for I(2).jpg\n", "\n", "\n", "A person is dressed in a military-inspired outfit in an urban setting, with a white mask that resembles a fox or wolf face, featuring large eyes and a snout. The mask covers the entire face, leaving only the mouth exposed. The person is wearing a gray long-sleeved shirt, a tactical vest with a green and brown camouflage pattern, and a matching green skirt with a high waist. The vest is loaded with pouches and pockets, and the person has a black helmet with a visor, ear protectors, and a head-mounted microphone.\n", "\n", "The person is holding a large, black assault rifle with a scope on top, strapped across the chest with a sling. They also have a large, round, green backpack on their back, and a large, round, green bag hanging from their right hip. The person's legs are covered in green and brown knee pads, and they are wearing black gloves.\n", "\n", "In the background, there are bare trees, indicating a winter or early spring setting, and a red and yellow sign with Chinese characters. The buildings in the background are multi-story, with a mix of windows and doors, and a few people are walking around, suggesting an urban environment. The overall atmosphere is one of preparation and readiness.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for I(3).jpg\n", "\n", "\n", "A person in cosplay gear kneels on reddish-brown dirt and gravel, taking aim with a sniper rifle. The sniper rifle is large and black, with a scope and a long barrel, pointed to the left of the image. The person is dressed in a black tactical vest with several pouches and straps, and a black long-sleeved shirt with a high collar. They wear black shorts and black thigh-high stockings, and black combat boots with laces. Their hair is a striking, bright red, styled in a bob cut with bangs covering their eyes, giving them a stealthy look. The background features a rusted, weathered, and partially dismantled white metal structure, which could be the remains of an old vehicle or a building, with visible peeling paint and rust. The ground is a mix of dirt and small rocks, with some green grass patches and a few small weeds. The overall scene has a gritty, militaristic feel, enhanced by the rough textures of the dirt, metal, and the person's outfit. The lighting is natural, suggesting an outdoor setting, possibly in a forest or rural area. The image captures a moment of intense focus and readiness, emphasizing the person's camouflage and military-inspired attire.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for I(4).jpg\n", "\n", "\n", "A young girl stands against a plain white background, with a minimalist, studio-style setting. She has a pale complexion and straight, shoulder-length blonde hair with bangs. Her large, expressive eyes are highlighted by subtle makeup, giving her a soft, youthful look. She wears a school uniform consisting of a gray blazer with a white shirt and a black and white plaid skirt. The blazer is fitted, with gold buttons and a small pocket on the left breast. A black tie is loosely knotted around her neck. Her skirt is pleated and slightly flared, reaching mid-thigh. She has a black backpack slung over her shoulders, with a gray zipper and straps. Her hands are clasped together in front of her chest, holding a small, black, rectangular camera, which she looks at intently. The lighting is even and soft, with no harsh shadows, creating a clean and polished look. The background is plain white, drawing attention to the girl and her outfit. The texture of her clothes is smooth and slightly shiny, indicating a synthetic fabric. Her posture is upright and confident, with a slight tilt of her head, suggesting a thoughtful or curious expression. Overall, the image combines the elements of a school uniform with a modern, digital photography theme.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for I(5).jpg\n", "\n", "\n", "A young woman with long, straight, dark brown hair and bangs is wielding a rifle in an intense, gritty scene. Her skin is pale, and she has a serious look on her face. She's wearing a black mask that covers her nose and mouth, and a light gray, long-sleeved jacket over a white shirt with a black tie. Her jacket has a texture that looks like a heavy, woolen fabric. She's also wearing black tactical gloves, and her rifle is equipped with a scope, which she is aiming at the viewer.\n", "\n", "The background is a dimly lit, industrial setting with a corrugated metal wall and pipes running horizontally. The lighting is blue and cold, creating a tense and serious atmosphere. The colors in the image are mostly dark blues and grays, adding to the somber mood. In the upper right corner, there is text in English and Chinese that reads \"Daughters of TROY\" and \"TROY的女儿,\" indicating this is a promotional image for a movie or TV show.\n", "\n", "The photo captures the moment with sharp focus on the woman and her rifle, highlighting the tension and danger of the scene. The overall style is realistic, with a focus on the details of the setting and the characters.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for I(6).jpg\n", "\n", "\n", "A young woman with a short, straight, light brown bob haircut is holding a sniper rifle. She's wearing black gloves, a white shirt with the sleeves rolled up, and a black tactical vest. The rifle has a large scope with a protective cover and a long barrel. The scope is aimed at something off-screen, suggesting she's engaged in surveillance or hunting. The background is an urban setting with a cloudy sky, a bridge with black railings, and a blurred view of buildings. The woman is focused and intense, with a serious expression on her face. The photograph has a realistic and gritty style, capturing the tension and precision required for sniper operations. The lighting is natural, with soft shadows indicating an overcast day. The textures in the image include the smoothness of the rifle's metal and plastic parts, the roughness of the black gloves, and the fabric of the white shirt and black vest. The colors are muted, with shades of gray, black, and white dominating the scene. The composition is tight, with the sniper rifle and the woman's face taking up most of the frame, emphasizing the importance of her action. The overall mood is serious and focused, highlighting the skill and dedication required for sniper work.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for I(7).jpg\n", "\n", "\n", "A young woman strikes a provocative pose in a dimly lit, neon-lit bar. She is an Asian woman with long, straight, platinum-blonde hair and pale skin. She wears a black, short-sleeved top that reveals her midriff, and a black, high-waisted, tight-fitting skirt that accentuates her hips and buttocks. Black thigh-high stockings cover her legs, and she wears black gloves on her hands. She also has black bunny ears on her head and a fluffy white tail attached to the waistband of her skirt.\n", "\n", "The woman is leaning forward with her back to the camera, showcasing her curvy figure and buttocks. She has a small waist and wide hips. Her facial expression is sultry, with full, red lips and a slight smile. Her eyes are hidden by long hair, and she wears black eyeshadow.\n", "\n", "In the background, the bar is decorated with neon signs, one of which says \"BELL.\" A large, round, metal drum sits on a stool behind her, with a pair of red gloves resting on top of it. The bar has a gritty, industrial feel with exposed pipes and a metal ceiling. A paper lantern with an orange light hangs from the ceiling, adding to the dimly lit atmosphere. The floor is\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for I(8).jpg\n", "\n", "\n", "A young woman stands in an industrial setting, holding a futuristic assault rifle. She has pale skin and straight, platinum blonde hair with bangs. Her makeup is minimal, with subtle eyeliner and mascara. She is wearing a black, long-sleeved, ribbed sweater that hugs her slender frame and a high-waisted, knee-length skirt with a white and black geometric pattern. The skirt has a textured, almost leather-like appearance.\n", "\n", "She is holding the assault rifle with both hands, pointing it to the side. The rifle has a sleek, futuristic design with a black color and a matte finish. It has a large magazine and a long barrel, indicating it is a powerful weapon.\n", "\n", "The background consists of a metal and glass structure with a purple and white color scheme. The metal is rusted and worn, with visible screws and rivets. The glass panels are tinted with a pattern of small circular holes. The floor is concrete, and the overall atmosphere is gritty and industrial.\n", "\n", "The lighting is bright, with sunlight streaming through the glass panels, casting a glow on the subject and creating highlights on her hair and the rifle. The scene combines elements of futuristic military and industrial design, creating a striking and dynamic image.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for I(9).jpg\n", "\n", "\n", "A young woman in a cosplay maid outfit poses confidently with a rifle in a gritty, industrial setting. She's standing on a metal staircase with metal steps and a handrail, and the background shows a dark, metallic texture. Her skin is fair, and she has a slender build with medium-sized breasts, noticeable through her outfit. Her short, straight black hair is styled with a white maid headband adorned with frills and a small bow, adding a touch of cuteness to her serious demeanor.\n", "\n", "She's dressed in a black maid outfit with white frills, featuring a high-collared top that shows off her cleavage and a short skirt with a ruffled hem. Her outfit is accessorized with black thigh-high stockings and black gloves, giving her a provocative yet stylish look. A black utility belt with multiple pouches and a holstered rifle is strapped around her waist, and she holds a sniper rifle with a tan stock and scope in her right hand, ready for action. The rifle's details, such as the scope and trigger guard, are visible.\n", "\n", "Her expression is determined and confident, with her eyes focused on the camera and lips slightly parted. The overall vibe of the image is a blend of cosplay and military, highlighting her dual roles as a maid and a soldier.\n" ] } ] }, { "cell_type": "code", "source": [ "letter = 'J'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "_shI8ri-W9PT", "outputId": "02191e58-b9bd-4d71-cca2-806b2e34129c" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for J(1).jpg\n", "\n", "\n", "A photograph featuring four women in a grand, ornate room with high ceilings and large, dark wooden pillars. The room is lit by soft, warm lights, casting a cozy glow on the polished wooden floor. The women are dressed in a mix of vintage and gothic-inspired outfits, adding to the scene's surreal and mysterious vibe.\n", "\n", "The woman on the far left is sitting on the floor with her legs bent, wearing a sleeveless white dress with a delicate floral pattern and a high neckline. She holds a rifle across her lap, her gaze directed at the camera with a solemn expression.\n", "\n", "The woman in the center is standing and holding a rifle across her chest. She has short, platinum blonde hair and wears a light pink lace dress with long sleeves, a high neckline, and a flowing skirt. Her expression is intense and focused.\n", "\n", "The woman on the far right is kneeling on the floor, holding a rifle across her lap. She has long, blonde hair and wears a black, long-sleeved dress with a high collar and a voluminous skirt. Her expression is neutral, adding to the scene's enigmatic atmosphere.\n", "\n", "The background features dark marble columns and a large wooden door, adding to the room's grandeur and historical feel. The overall mood is one of tension and mystery, enhanced by the women's\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for J(2).jpg\n", "\n", "\n", "A highly detailed digital artwork features a young female soldier, possibly in her late teens to early twenties, with pale skin and long, straight platinum blonde hair. She is wearing a full tactical getup, including a black combat helmet with a dark visor, a black tactical vest loaded with pouches, and a black tactical backpack. She is armed with an assault rifle slung over her shoulder and a handgun holstered on her right hip. Her expression is serious and focused, with a slight furrow in her brow.\n", "\n", "The background is a futuristic, industrial setting, with a smoky, blue-gray haze that adds a sense of tension and urgency. In the background, there are metallic structures and what appear to be futuristic machinery, suggesting a high-tech environment. The overall color scheme of the image is dark and muted, with blacks, grays, and blues dominating the scene.\n", "\n", "The artist's signature and the work's title, \"MIAKIMIC,\" are displayed in the top left corner of the image in a black rectangular box. The image is detailed and realistic, capturing the textures of the soldier's gear and the industrial backdrop with precision. The overall mood is intense and dramatic, emphasizing the soldier's readiness for action in a high-stakes environment.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for J(3).jpg\n", "\n", "\n", "A young East Asian girl stands confidently in a classroom, holding a large assault rifle. She is dressed in a sailor schoolgirl outfit, including a white sailor hat with a blue ribbon, a white sailor collar, a navy blue pleated skirt, a navy blue bow, white knee-high socks, and black loafers. The skirt is short, reaching just above her knees. The outfit is adorned with various badges and patches, adding to its detailed design.\n", "\n", "The rifle she holds is a realistic, black and brown assault rifle with a camouflage pattern on the stock, equipped with a red and black scope and a drum magazine. She holds it with both hands, with the barrel pointed downward, and her expression is neutral, with a slight smile.\n", "\n", "The classroom is bright and spacious, with large windows that let in natural light, and a white ceiling with visible grid lines. There are several wooden desks and chairs scattered around, and a large, blackboard-style window behind her. The floor is a light-colored, polished wood, adding to the clean and orderly atmosphere of the room.\n", "\n", "The overall scene is a mix of schoolgirl innocence and military readiness, combining elements of cosplay and practicality. The image is well-lit, with a soft, natural light that highlights the details of both the girl and her surroundings.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for J(4).jpg\n", "\n", "\n", "A young woman in a cosplay costume poses outdoors in a public area. She is dressed as a soldier, wearing a black leather jacket with red trim and a matching beret adorned with a red star emblem. Her outfit includes a red necktie, black shorts with red star accents, thigh-high black stockings, and black combat boots. The costume features a utility belt with pouches and pockets, adding to the military look.\n", "\n", "She holds a futuristic assault rifle with a black and silver design, reminiscent of a modern combat weapon. Her long, straight, light pink hair cascades over her shoulders, and she wears light pink lipstick. Her expression is intense and focused, with a serious look in her eyes.\n", "\n", "In the background, there is a blurred scene of people walking or standing, suggesting a busy urban setting. The sky is overcast, and the colors are muted, with a gray and blue palette dominating the scene. The texture of her outfit is smooth and shiny, while the rifle has a metallic, matte finish. The overall style of the cosplay is realistic and detailed, with attention to both the character's appearance and the props used.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for J(5).jpg\n", "\n", "\n", "A young woman stands on a ship, dressed in full combat gear, holding a rifle. She is wearing a black helmet with a visor, a black balaclava, and black gloves. The helmet has various electronic devices attached to it, including a camera and a microphone. She has light brown hair peeking out from under the helmet. Her expression is focused and determined.\n", "\n", "She is dressed in a tactical vest with multiple pockets and pouches, and a tan jacket over it. The jacket is made of a rugged, water-resistant material, and it has a high collar that protects her neck. She is also wearing a black backpack with straps going over her shoulders and across her chest. The backpack is made of a sturdy, military-grade fabric.\n", "\n", "In the background, the ship is visible, with its gray hull and various metal pipes and cables. The ocean is calm, with gentle waves reflecting the light of the setting sun, which is low on the horizon. The sky is overcast with clouds, giving a muted, grayish-blue color to the scene. The overall mood of the photograph is serious and intense, capturing the readiness and preparedness of the woman for a potential mission.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for J(6).jpg\n", "\n", "\n", "A young woman in a cosplay outfit poses in a modern industrial setting. She has fair skin and long, flowing pink hair with red horns on top, giving her a playful, otherworldly look. Her eyes are a striking blue, and she wears a confident expression with slightly parted lips.\n", "\n", "She's dressed in a white blouse with long sleeves and puffy cuffs, paired with a black leather harness that wraps around her waist and thighs, featuring metal buckles and straps. The harness is adorned with a small, red triangle emblem on the left side. She also wears black thigh-high stockings and black fingerless gloves with metal studs.\n", "\n", "In her hands, she grips a large, futuristic black gun, which she aims upward with a determined look. The gun has a metallic texture with a cylindrical barrel and a detailed design.\n", "\n", "The background features a large window with frosted glass and a geometric red triangle pattern. The room is industrial, with exposed pipes and metal beams. A wooden chair and a blue fabric blanket are visible in the background, adding a touch of warmth to the otherwise cold environment. The lighting is bright and natural, highlighting the details of her outfit and the setting.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for J(7).jpg\n", "\n", "\n", "A young woman in her early 20s with fair skin and short, dark brown hair styled in a bob with bangs is wielding an assault rifle. She's wearing a white dress shirt with the sleeves rolled up, showing off her forearms, and a tactical vest in black and olive green camouflage, indicating she's a soldier. The vest has multiple pockets and straps for carrying gear, and it looks rugged and functional. She's gripping the rifle with both hands, her fingers wrapped around the stock and trigger guard, and her gaze is focused and intense, looking directly at the camera.\n", "\n", "The rifle is black with a modern design, featuring a red laser sight mounted on the upper receiver, which is visible in the image. The background is a plain, dark gray, which makes the woman and her gear stand out. The lighting is bright and even, highlighting the details of her outfit and the rifle. The overall mood of the image is intense and focused, emphasizing the woman's readiness and determination.\n", "\n", "The photograph is sharp and well-composed, with a clear and detailed focus on the woman and her weapon. The background is minimalistic, keeping the viewer's attention on the subject.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for J(8).jpg\n", "\n", "\n", "A person stands against a dark, almost black background, holding a large, black assault rifle diagonally across their body. The person's face is not visible, and only their torso and legs are seen. They are wearing a white long-sleeved shirt, tucked into a high-waisted, pleated, gray plaid skirt. The skirt has a subtle pattern of gray and white checkered lines. The rifle, which is a military-style weapon, has a wooden stock and metal parts, including a barrel, magazine, and handguard. The person's hands are gripping the rifle securely, with one hand on the handguard and the other near the magazine. The texture of the rifle is rough, contrasting with the smooth, slightly shiny surface of the skirt. The lighting is dim, casting a moody atmosphere with highlights on the person's shirt and skirt, and shadows on the rifle and background. The overall composition emphasizes the juxtaposition of the person's civilian clothing with the military-style weapon, creating a sense of tension and contrast. The photo is taken from a low angle, emphasizing the height of the person and the length of the rifle. The image captures a moment of tension and conflict, with the person holding the rifle in a confident and ready stance.\n", "Prompt: Describe the image in 400 words\n", "\n", "\n", "caption for J(9).jpg\n", "\n", "\n", "A young Asian woman stands inside a dimly lit hallway, wearing a military-style getup. Her long, straight, green hair is tied back into a ponytail, and she's rocking a pair of black over-ear headphones. Her skin is fair, and she's got a serious, focused expression on her face. Her outfit includes a white short-sleeve shirt with a badge on the left sleeve, a camouflage tactical vest, and a matching camouflage skirt. She's also decked out with a black tactical belt and a black utility pouch strapped across her chest, along with various black pouches and gear on her waist. In her hands, she holds a black assault rifle, with the barrel pointing down, gripping it firmly.\n", "\n", "The background features a plain, light-colored wall with a door to the right and a hallway on the left. The lighting is dim, creating a moody atmosphere. The textures of her clothing and gear are rugged and military-grade, while her skin has a smooth texture. The overall vibe is serious and intense, with the woman appearing to be on a mission or in a combat situation. The image is a photograph, capturing a moment of preparedness and readiness.\n" ] } ] }, { "cell_type": "code", "source": [ "letter = 'K'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "FX3HWzCIW9iA" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'L'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "V06Nqdn-W93_" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'M'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "2aupdD_oXCr5" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'N'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "Rn6gBLy0XD1I" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'O'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "cPQIIvkLXGHw" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'P'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "fwlpFPJSXHO7" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'Q'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "XAzlNJuEXJ8W" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'R'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "P04korRiXL2y" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'S'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "QRX8adlXXNaW" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'T'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "jv59CBmSXPLm" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'U'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "TtTegxk2XQrT" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'V'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "f4kSylDaXSLR" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'W'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "cVBzInnnXTuP" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'X'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "qYMthfr3XVLc" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "letter = 'Y'\n", "\n", "for number in range(9):\n", " input_image = Image.open(f\"/content/{letter} ({number+1}).jpg\").convert('RGB')\n", " # caption type (descriptive/training_prompt/rng-tags)\n", " # caption tone (formal/informal)\n", " # caption length (any/very short/short/medium-length/long/very long or a specific number)\n", " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n", " print(f\"\\n\\ncaption for {letter}({number+1}).jpg\\n\\n\")\n", " print(caption)" ], "metadata": { "id": "5sxZPxjwXXHC" }, "execution_count": null, "outputs": [] } ], "metadata": { "colab": { "provenance": [], "gpuType": "T4" }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "name": "python" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "aa6bcb20909c4dabb4e50cbe669d2e59": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_76bc7cdb2b104e45a4003d6c22984ede", "IPY_MODEL_1e061aa7debb41d79ebef10055081c12", "IPY_MODEL_66675c37ea4b49149ead2b347ebaa537" ], "layout": "IPY_MODEL_c3051c4febeb4037bc69d05d43b8cda0" } }, "76bc7cdb2b104e45a4003d6c22984ede": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_cf5ab98a09c84fe099dcd0712a6d06cb", "placeholder": "​", "style": "IPY_MODEL_e8b290c91e404cc0bc06b8750f4bd56e", "value": "preprocessor_config.json: 100%" } }, "1e061aa7debb41d79ebef10055081c12": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_e6e03ff76c504707be4eb27f162769a1", "max": 368, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_9b7e280a64bd401dbbac84654eaee07b", "value": 368 } }, "66675c37ea4b49149ead2b347ebaa537": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_daea442d834348259866caf9ede23031", "placeholder": "​", "style": "IPY_MODEL_85ae3126bd264e28a168a69b5843f56e", "value": " 368/368 [00:00<00:00, 22.1kB/s]" } }, "c3051c4febeb4037bc69d05d43b8cda0": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "cf5ab98a09c84fe099dcd0712a6d06cb": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "e8b290c91e404cc0bc06b8750f4bd56e": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "e6e03ff76c504707be4eb27f162769a1": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9b7e280a64bd401dbbac84654eaee07b": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "daea442d834348259866caf9ede23031": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "85ae3126bd264e28a168a69b5843f56e": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "eaad4156f51542809864313ae0ca6d4b": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_9e7f0858f3674014893a375ce0dba0c4", "IPY_MODEL_610c1bad13154c8cbb724a86da4a8abd", "IPY_MODEL_1a2748ed8b0843158e487295cd277a37" ], "layout": "IPY_MODEL_ce84c5b61fe0437588958bba178fb421" } }, "9e7f0858f3674014893a375ce0dba0c4": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_9c94abf070634379bf79a850189a7e20", "placeholder": "​", "style": "IPY_MODEL_4eb4542532f14ea785ef664dcc22f212", "value": "tokenizer_config.json: 100%" } }, "610c1bad13154c8cbb724a86da4a8abd": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_9cec881e50da4f88a730d9408a02068b", "max": 711, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_6d934ad9eb41426abcedcdbc24e52045", "value": 711 } }, "1a2748ed8b0843158e487295cd277a37": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_68e9cabe2c2b4dfda49e527d7897dba6", "placeholder": "​", "style": "IPY_MODEL_28bd944a6eb34cbd897ca24df9376aed", "value": " 711/711 [00:00<00:00, 28.3kB/s]" } }, "ce84c5b61fe0437588958bba178fb421": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9c94abf070634379bf79a850189a7e20": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "4eb4542532f14ea785ef664dcc22f212": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "9cec881e50da4f88a730d9408a02068b": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "6d934ad9eb41426abcedcdbc24e52045": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "68e9cabe2c2b4dfda49e527d7897dba6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "28bd944a6eb34cbd897ca24df9376aed": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "a39c5b146c084e83ab577dce631369b7": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_88d0b081a21243eb8d4054596e1c7ef9", "IPY_MODEL_53b67df9d4ae4ac9ab48e747d8dcd033", "IPY_MODEL_42cc71df9faf4659aefd3ba24dc9e670" ], "layout": "IPY_MODEL_0d474dbef7d4443e8acedf7a7b79c86b" } }, "88d0b081a21243eb8d4054596e1c7ef9": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_c7b17b22a78e4809b4cab83a86ed2965", "placeholder": "​", "style": "IPY_MODEL_e3e5117f373740889a529e673e0ac2c4", "value": "spiece.model: 100%" } }, "53b67df9d4ae4ac9ab48e747d8dcd033": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_c228a09ca0c44e97bb84fe87f25ec165", "max": 798330, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_2a374055a6bb4412a493c35958c7e860", "value": 798330 } }, "42cc71df9faf4659aefd3ba24dc9e670": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_b8ba56c2bd4346379da6d7628a1a6fc9", "placeholder": "​", "style": "IPY_MODEL_07ae6aed5cbd439b990a18e934fbcf2f", "value": " 798k/798k [00:00<00:00, 8.85MB/s]" } }, "0d474dbef7d4443e8acedf7a7b79c86b": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c7b17b22a78e4809b4cab83a86ed2965": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "e3e5117f373740889a529e673e0ac2c4": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "c228a09ca0c44e97bb84fe87f25ec165": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "2a374055a6bb4412a493c35958c7e860": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "b8ba56c2bd4346379da6d7628a1a6fc9": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "07ae6aed5cbd439b990a18e934fbcf2f": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "20f386c28e3b4b5c8b2a1d473694f184": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_5061bd227d7a4c55823789ccc14c1bb7", "IPY_MODEL_b6130cbb281e4ef5b703fda57fd3722f", "IPY_MODEL_96235cd1bd0e4d689ffed199a5f7961b" ], "layout": "IPY_MODEL_ccd64ee52a114f31b60183be8dd4dd44" } }, "5061bd227d7a4c55823789ccc14c1bb7": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_1fb43523676d4bf0933755e0cb378776", "placeholder": "​", "style": "IPY_MODEL_1f32165a5e9e47f4b438e2a7dd7b2127", "value": "special_tokens_map.json: 100%" } }, "b6130cbb281e4ef5b703fda57fd3722f": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_56e3d9a26b00431da1b86e5054a9dc70", "max": 409, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_bd84808c448e440c9808a8ee13105018", "value": 409 } }, "96235cd1bd0e4d689ffed199a5f7961b": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_97a24237256a417f80fc9565b23444b7", "placeholder": "​", "style": "IPY_MODEL_23ee2c81346d4e119a748bb4fc99f9d5", "value": " 409/409 [00:00<00:00, 29.9kB/s]" } }, "ccd64ee52a114f31b60183be8dd4dd44": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "1fb43523676d4bf0933755e0cb378776": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "1f32165a5e9e47f4b438e2a7dd7b2127": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "56e3d9a26b00431da1b86e5054a9dc70": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "bd84808c448e440c9808a8ee13105018": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "97a24237256a417f80fc9565b23444b7": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "23ee2c81346d4e119a748bb4fc99f9d5": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "b10c589bfcd9473fbbe32a165d9984a9": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_f66816c9b3a64f52b2711a1d4d704b69", "IPY_MODEL_6fe65841b8604e58a6e838b59e554c70", "IPY_MODEL_124b40be3d5b4e1b891ba2a84f4c1086" ], "layout": "IPY_MODEL_c4f45da24d18425687b09ff05e4a4148" } }, "f66816c9b3a64f52b2711a1d4d704b69": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_fb784b2bcf344e48bebb14e5d1abe0a8", "placeholder": "​", "style": "IPY_MODEL_63820c8a4cfc418493387853b399ace9", "value": "tokenizer.json: 100%" } }, "6fe65841b8604e58a6e838b59e554c70": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_575379db79eb46bdae141858567937a4", "max": 2399357, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_04d5767da95c4393b6f632de9de6a8e4", "value": 2399357 } }, "124b40be3d5b4e1b891ba2a84f4c1086": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_c7da57ea831a4ff38868e0d90efe88ef", "placeholder": "​", "style": "IPY_MODEL_eabdf3b2e02d49e99e6f90b7c0d651ed", "value": " 2.40M/2.40M [00:00<00:00, 12.7MB/s]" } }, "c4f45da24d18425687b09ff05e4a4148": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "fb784b2bcf344e48bebb14e5d1abe0a8": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "63820c8a4cfc418493387853b399ace9": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "575379db79eb46bdae141858567937a4": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "04d5767da95c4393b6f632de9de6a8e4": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "c7da57ea831a4ff38868e0d90efe88ef": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "eabdf3b2e02d49e99e6f90b7c0d651ed": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "8cc627f0f2c44d6aac739435d84d18e0": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_590e2d1c32f744a9bd2eca814f24930d", "IPY_MODEL_c4caa616ef0c4fb2b27b7b0dc8007694", "IPY_MODEL_b3baa81e3b92443ca101369ee61cdc59" ], "layout": "IPY_MODEL_d526edfee33d4107b3b9bf8ce2a6a5de" } }, "590e2d1c32f744a9bd2eca814f24930d": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_1800d33557ac4f31877d8aa9ccd2843c", "placeholder": "​", "style": "IPY_MODEL_e28e5e0deec042599f3a098f7de344df", "value": "config.json: 100%" } }, "c4caa616ef0c4fb2b27b7b0dc8007694": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_d7cda865776142ddb5fa0e6199f9ece4", "max": 576, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_cdf884e8d5874cf39b6c4e36d75e357f", "value": 576 } }, "b3baa81e3b92443ca101369ee61cdc59": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_d0010cba64424a30ae74afbe449f495c", "placeholder": "​", "style": "IPY_MODEL_bb90845af9ef4c7a910e9ed5153960b4", "value": " 576/576 [00:00<00:00, 25.9kB/s]" } }, "d526edfee33d4107b3b9bf8ce2a6a5de": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "1800d33557ac4f31877d8aa9ccd2843c": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "e28e5e0deec042599f3a098f7de344df": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "d7cda865776142ddb5fa0e6199f9ece4": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "cdf884e8d5874cf39b6c4e36d75e357f": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "d0010cba64424a30ae74afbe449f495c": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "bb90845af9ef4c7a910e9ed5153960b4": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "a5855c924dc743c39b0a60c155fdda96": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_aa9ff6b5be1948b9a59bf677607598f3", "IPY_MODEL_00c024acefd9448cb973a520329b5bda", "IPY_MODEL_75ecc2bc46b443ddb3d5b0317c89ab62" ], "layout": "IPY_MODEL_f9db21e92be04beca4ab5f6b7f0d289d" } }, "aa9ff6b5be1948b9a59bf677607598f3": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_7816f3b7270d42cd95a69cda53288dca", "placeholder": "​", "style": "IPY_MODEL_d59b61a44a1d4e6cb846d9768903a987", "value": "model.safetensors: 100%" } }, "00c024acefd9448cb973a520329b5bda": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_692282b87f4740deaa4dbdc740497541", "max": 3511950624, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_843b7f3e0f5b4403b69e5c694b79aed6", "value": 3511950624 } }, "75ecc2bc46b443ddb3d5b0317c89ab62": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_81727e5eab5149d2a24007f9d59b517b", "placeholder": "​", "style": "IPY_MODEL_39585b15c40b4476889dc98ed110f6ff", "value": " 3.51G/3.51G [01:23<00:00, 42.1MB/s]" } }, "f9db21e92be04beca4ab5f6b7f0d289d": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "7816f3b7270d42cd95a69cda53288dca": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "d59b61a44a1d4e6cb846d9768903a987": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "692282b87f4740deaa4dbdc740497541": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "843b7f3e0f5b4403b69e5c694b79aed6": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "81727e5eab5149d2a24007f9d59b517b": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "39585b15c40b4476889dc98ed110f6ff": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "9b2cb8292d53406d97fd89bb0d53aaaa": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_e5b077fdb79d4461819413f2d75b00f4", "IPY_MODEL_2cd0959597d84b0184fc9e4b9c5a119e", "IPY_MODEL_36a8d82b88374c72aa6b5eb35d22b1f6" ], "layout": "IPY_MODEL_03262042969345e78d0c722934844214" } }, "e5b077fdb79d4461819413f2d75b00f4": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_bad4cb7eb51042c5b683953ac4ab373b", "placeholder": "​", "style": "IPY_MODEL_bcb524c0e5084a5f8a0eac92b33e386e", "value": "tokenizer_config.json: 100%" } }, "2cd0959597d84b0184fc9e4b9c5a119e": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_8227b4e0fa924ed6bb3f91e72163004f", "max": 50570, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_863a287788ea48558e72e15259db9650", "value": 50570 } }, "36a8d82b88374c72aa6b5eb35d22b1f6": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_865df9bf621647e7ab51044919c5ac83", "placeholder": "​", "style": "IPY_MODEL_df3450c5249441c2960ce40b72177127", "value": " 50.6k/50.6k [00:00<00:00, 3.01MB/s]" } }, "03262042969345e78d0c722934844214": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "bad4cb7eb51042c5b683953ac4ab373b": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "bcb524c0e5084a5f8a0eac92b33e386e": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "8227b4e0fa924ed6bb3f91e72163004f": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "863a287788ea48558e72e15259db9650": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "865df9bf621647e7ab51044919c5ac83": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "df3450c5249441c2960ce40b72177127": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "2900e012356645278c77b9d2c2a1f7ff": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_43d676701db24db88dabb37927f005a6", "IPY_MODEL_0696d2c6681c4c719f35d8c70818b8d8", "IPY_MODEL_a1f237be5f9a47f18adbecaedcde48a4" ], "layout": "IPY_MODEL_7666f625e2db42759540ed9b18cb04d6" } }, "43d676701db24db88dabb37927f005a6": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_aebaa183831e4ef2ab3bcd2fdc06d8fa", "placeholder": "​", "style": "IPY_MODEL_9bb60a5ea23b47d08369a002648068ab", "value": "tokenizer.json: 100%" } }, "0696d2c6681c4c719f35d8c70818b8d8": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_06fb48a30c3e46609179628588d2bd0d", "max": 9085657, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_48b9353cd83144b4a5b41c7f1a3c4f67", "value": 9085657 } }, "a1f237be5f9a47f18adbecaedcde48a4": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_d19f08ad784c4614a7612f81f5a55706", "placeholder": "​", "style": "IPY_MODEL_2dd3c04db9904878a2eb6d92878e6beb", "value": " 9.09M/9.09M [00:00<00:00, 46.0MB/s]" } }, "7666f625e2db42759540ed9b18cb04d6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "aebaa183831e4ef2ab3bcd2fdc06d8fa": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9bb60a5ea23b47d08369a002648068ab": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "06fb48a30c3e46609179628588d2bd0d": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "48b9353cd83144b4a5b41c7f1a3c4f67": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "d19f08ad784c4614a7612f81f5a55706": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "2dd3c04db9904878a2eb6d92878e6beb": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "8451f18d92e44e6f97b2293b6aa39d49": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_a63f7abc9c984796a3e3ce274f7fccef", "IPY_MODEL_2ff9281e97f34af1992af1052b66f611", "IPY_MODEL_6092b8069adf4b9690be177472ba245e" ], "layout": "IPY_MODEL_f2a8f7e2fd8b415c9d9c8c3ddcbfe771" } }, "a63f7abc9c984796a3e3ce274f7fccef": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_9d7d3f44d62f4d10866fd7fdd7901eac", "placeholder": "​", "style": "IPY_MODEL_8e3c110936d04d4aa3275c41820e3c9b", "value": "special_tokens_map.json: 100%" } }, "2ff9281e97f34af1992af1052b66f611": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_6182c5b765e049a49bc2383cd0e90c9c", "max": 345, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_27699bde1e9844a4823d1b0398c42a5f", "value": 345 } }, "6092b8069adf4b9690be177472ba245e": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_31aa337ae12545219135eddc6a0b3ce5", "placeholder": "​", "style": "IPY_MODEL_ceb9bc86a4d049f7a06708f045a07ecd", "value": " 345/345 [00:00<00:00, 27.7kB/s]" } }, "f2a8f7e2fd8b415c9d9c8c3ddcbfe771": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9d7d3f44d62f4d10866fd7fdd7901eac": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "8e3c110936d04d4aa3275c41820e3c9b": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "6182c5b765e049a49bc2383cd0e90c9c": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "27699bde1e9844a4823d1b0398c42a5f": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "31aa337ae12545219135eddc6a0b3ce5": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "ceb9bc86a4d049f7a06708f045a07ecd": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "3556f5edde894b63ae368fd1fb816592": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_2a810a1a7bfd4afa93602fe9c5929b6c", "IPY_MODEL_194660582f7e4f48b280bc5665003a6b", "IPY_MODEL_06e6a7d2a72a4413a9db87976518404c" ], "layout": "IPY_MODEL_3b29bb51812f4013ac53fbf757809157" } }, "2a810a1a7bfd4afa93602fe9c5929b6c": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_2456f731f00b4a00ae179c246b27eef4", "placeholder": "​", "style": "IPY_MODEL_6d925927c88e40a1a0c11ebff9c18c82", "value": "config.json: 100%" } }, "194660582f7e4f48b280bc5665003a6b": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_10e8fcc6749940ddb404e1962ec7602f", "max": 926, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_5d543ccbcdb1439697a67f9d8dfc9962", "value": 926 } }, "06e6a7d2a72a4413a9db87976518404c": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_89f6a4183d8b4a09bfdf6d4468ce60f9", "placeholder": "​", "style": "IPY_MODEL_4318346385f543128617b4cdb566e552", "value": " 926/926 [00:00<00:00, 79.4kB/s]" } }, "3b29bb51812f4013ac53fbf757809157": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "2456f731f00b4a00ae179c246b27eef4": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "6d925927c88e40a1a0c11ebff9c18c82": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "10e8fcc6749940ddb404e1962ec7602f": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "5d543ccbcdb1439697a67f9d8dfc9962": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "89f6a4183d8b4a09bfdf6d4468ce60f9": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "4318346385f543128617b4cdb566e552": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "8cc27d8c393f4d42949f45eff03460d7": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_41bc31ff01644dd9bc628a74e5986b37", "IPY_MODEL_99e07ad14b9c485c9cf758df4e2d3bc6", "IPY_MODEL_850fdb775c0646bf98ece317e754dbfe" ], "layout": "IPY_MODEL_90662b18c91749898cd7ed15274133be" } }, "41bc31ff01644dd9bc628a74e5986b37": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_ff2485421e1548039c2493796a208eff", "placeholder": "​", "style": "IPY_MODEL_4037f21950c04c96b60248cd62220a25", "value": "model.safetensors.index.json: 100%" } }, "99e07ad14b9c485c9cf758df4e2d3bc6": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_2afd5fd4f6a244afbbeb7500ffb8ee67", "max": 23950, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_962f80ff5b0f451eabeb4a8349071714", "value": 23950 } }, "850fdb775c0646bf98ece317e754dbfe": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_54c42da0e3ff44ed8a9f22e388db2341", "placeholder": "​", "style": "IPY_MODEL_c8a1352e5488458bbad1ecaba23dd1a8", "value": " 23.9k/23.9k [00:00<00:00, 2.13MB/s]" } }, "90662b18c91749898cd7ed15274133be": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "ff2485421e1548039c2493796a208eff": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "4037f21950c04c96b60248cd62220a25": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "2afd5fd4f6a244afbbeb7500ffb8ee67": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "962f80ff5b0f451eabeb4a8349071714": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "54c42da0e3ff44ed8a9f22e388db2341": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c8a1352e5488458bbad1ecaba23dd1a8": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "bab4df97ae5f45a19630bb99c2fc612a": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_c5b10f286a2443a48ff82b5b93a70d62", "IPY_MODEL_fe2fe435941848c5b5250b7260f486ff", "IPY_MODEL_fbf9b94ec50342a8ad5d640b61576a07" ], "layout": "IPY_MODEL_638e3190a8f6457d8c6f775032afbfd0" } }, "c5b10f286a2443a48ff82b5b93a70d62": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_dd2f5cd6d9a34c1b81ba2eb0ac5b5d0f", "placeholder": "​", "style": "IPY_MODEL_614e827d94b7467d93ff7f9a096e9e58", "value": "Downloading shards: 100%" } }, "fe2fe435941848c5b5250b7260f486ff": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_d6b8165336f647b09a433784e97e0e73", "max": 4, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_7c1724d20c424a008b54058ebb2076ec", "value": 4 } }, "fbf9b94ec50342a8ad5d640b61576a07": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_0b4f0424b16a4d408addc971df483f8c", "placeholder": "​", "style": "IPY_MODEL_883d9e3cb1a240d68e7e21d22de0da7d", "value": " 4/4 [06:21<00:00, 82.24s/it]" } }, "638e3190a8f6457d8c6f775032afbfd0": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "dd2f5cd6d9a34c1b81ba2eb0ac5b5d0f": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "614e827d94b7467d93ff7f9a096e9e58": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "d6b8165336f647b09a433784e97e0e73": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "7c1724d20c424a008b54058ebb2076ec": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "0b4f0424b16a4d408addc971df483f8c": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "883d9e3cb1a240d68e7e21d22de0da7d": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "907e2db2f8a443af81ad31a4e6b802dc": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_655c34951ec446e9ad915a0693ba453d", "IPY_MODEL_9fff9a20614045f99377169971092f53", "IPY_MODEL_e3a80619d7f74905bf71b38ffdef3dca" ], "layout": "IPY_MODEL_5e4de08259254b90a1833e4a52bdef07" } }, "655c34951ec446e9ad915a0693ba453d": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_2ed3276064c44a07a5881c478670d8e2", "placeholder": "​", "style": "IPY_MODEL_8de84515a5f8407eb2652bcf0a896d67", "value": "model-00001-of-00004.safetensors: 100%" } }, "9fff9a20614045f99377169971092f53": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_33063916070240c68df4d533ca4d2af6", "max": 4976698672, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_9fce9a73f16f4a448857490ca3e0cbe8", "value": 4976698672 } }, "e3a80619d7f74905bf71b38ffdef3dca": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_c183c90ab9b74fb3980f93fd3e49fbcf", "placeholder": "​", "style": "IPY_MODEL_ad4c28a5a14c41969c6243bfa5a7f5f5", "value": " 4.98G/4.98G [01:58<00:00, 42.4MB/s]" } }, "5e4de08259254b90a1833e4a52bdef07": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "2ed3276064c44a07a5881c478670d8e2": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "8de84515a5f8407eb2652bcf0a896d67": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "33063916070240c68df4d533ca4d2af6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9fce9a73f16f4a448857490ca3e0cbe8": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "c183c90ab9b74fb3980f93fd3e49fbcf": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "ad4c28a5a14c41969c6243bfa5a7f5f5": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "b64252230d0a49ba8d7a619b9a4dc2db": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_12dea66ec5fc4149a6d76861dbf2a515", "IPY_MODEL_e6c207bcb0a74d26aa41f39067bdab7d", "IPY_MODEL_2e6fc14164ff480f8a0533846ba2e649" ], "layout": "IPY_MODEL_1bea6a2714054136bc5600bf2ad64138" } }, "12dea66ec5fc4149a6d76861dbf2a515": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_ac0d3b7bc9f047fdaeb06cccbc9173df", "placeholder": "​", "style": "IPY_MODEL_075ff72eab074d86b90e1666f8b9d67e", "value": "model-00002-of-00004.safetensors: 100%" } }, "e6c207bcb0a74d26aa41f39067bdab7d": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_a5c8f180001040e7be951244e2a2f294", "max": 4999802720, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_53c8cb09101b49cc8cd72d303e5fcd80", "value": 4999802720 } }, "2e6fc14164ff480f8a0533846ba2e649": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_c0fbd2d628e84234beb4bf658348e52b", "placeholder": "​", "style": "IPY_MODEL_b24f46989aa249da951d75e64d5d4ee4", "value": " 5.00G/5.00G [01:58<00:00, 44.0MB/s]" } }, "1bea6a2714054136bc5600bf2ad64138": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "ac0d3b7bc9f047fdaeb06cccbc9173df": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "075ff72eab074d86b90e1666f8b9d67e": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "a5c8f180001040e7be951244e2a2f294": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "53c8cb09101b49cc8cd72d303e5fcd80": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "c0fbd2d628e84234beb4bf658348e52b": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "b24f46989aa249da951d75e64d5d4ee4": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "815e9798057a4479b207785f98139e21": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_306f9a5f25d94c798ccb371d1b397063", "IPY_MODEL_5d808c053d5c4f37b5b165d0de680ce8", "IPY_MODEL_ac0bb5a8d881483eb65563b9243ad3a4" ], "layout": "IPY_MODEL_7be8f121c2814719981c893f9b74016e" } }, "306f9a5f25d94c798ccb371d1b397063": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_020fcf08f95e4eb78f8a955af164adff", "placeholder": "​", "style": "IPY_MODEL_23534a68fbb143599b0bb028f09ac074", "value": "model-00003-of-00004.safetensors: 100%" } }, "5d808c053d5c4f37b5b165d0de680ce8": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_79f5f27010b44a6a88f90fa9df939f48", "max": 4915916176, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_58c80fb3ad92451ba3851af1b1fad0e7", "value": 4915916176 } }, "ac0bb5a8d881483eb65563b9243ad3a4": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_24442baa68df453386b79acd74878c1d", "placeholder": "​", "style": "IPY_MODEL_65cd68dad7b14ee3b5532fb83968d3cd", "value": " 4.92G/4.92G [01:56<00:00, 41.9MB/s]" } }, "7be8f121c2814719981c893f9b74016e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "020fcf08f95e4eb78f8a955af164adff": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "23534a68fbb143599b0bb028f09ac074": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "79f5f27010b44a6a88f90fa9df939f48": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "58c80fb3ad92451ba3851af1b1fad0e7": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "24442baa68df453386b79acd74878c1d": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "65cd68dad7b14ee3b5532fb83968d3cd": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "cdfb0aeb78ae45b7a8371d55117eddcf": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_71406407dcd447d5b22b7dfb3cdbd439", "IPY_MODEL_70f977ee2a044b75bd8e248aced3096f", "IPY_MODEL_48d4a67ea29d4269ac014c04a7be5215" ], "layout": "IPY_MODEL_46644d29bb62462ab429581d1e8abe66" } }, "71406407dcd447d5b22b7dfb3cdbd439": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_ce16728b11b44418adc5e382a3384ee8", "placeholder": "​", "style": "IPY_MODEL_88df5fc3ade9434f9a716f770b02b6ca", "value": "model-00004-of-00004.safetensors: 100%" } }, "70f977ee2a044b75bd8e248aced3096f": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_6e69d6a4135741748ad7b0ca0db36c4d", "max": 1168138808, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_9680d371e3e84a3a97e1bb7b46d56601", "value": 1168138808 } }, "48d4a67ea29d4269ac014c04a7be5215": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_057a932a422f40a1b1d5d1355382809b", "placeholder": "​", "style": "IPY_MODEL_c6fc4e45e2fd4e02ac86af15ceb0360f", "value": " 1.17G/1.17G [00:27<00:00, 42.6MB/s]" } }, "46644d29bb62462ab429581d1e8abe66": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "ce16728b11b44418adc5e382a3384ee8": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "88df5fc3ade9434f9a716f770b02b6ca": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "6e69d6a4135741748ad7b0ca0db36c4d": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9680d371e3e84a3a97e1bb7b46d56601": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "057a932a422f40a1b1d5d1355382809b": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c6fc4e45e2fd4e02ac86af15ceb0360f": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "43cbb633025845e7a6fec832cde23d1a": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_ec6aa86b63454691899a09e8da6a2b41", "IPY_MODEL_81607d81cc39426bbc8923a5ffa8975b", "IPY_MODEL_b03ead37155e43ab868eed2e204fe410" ], "layout": "IPY_MODEL_367ffb6144204d3b9c318d4a70affe44" } }, "ec6aa86b63454691899a09e8da6a2b41": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_e8eea123814a4efe9cfd0b61d8a5121a", "placeholder": "​", "style": "IPY_MODEL_4628b4533b8244c88c27169c8d63f533", "value": "Loading checkpoint shards: 100%" } }, "81607d81cc39426bbc8923a5ffa8975b": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_a8e4a52013d546d78f6eb03248012350", "max": 4, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_72485278675e47958ddbb4649f10ec50", "value": 4 } }, "b03ead37155e43ab868eed2e204fe410": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_1b9c1204bd58417db870566c51b6d791", "placeholder": "​", "style": "IPY_MODEL_53003428380f4a0498233c186c5bd63f", "value": " 4/4 [01:20<00:00, 17.19s/it]" } }, "367ffb6144204d3b9c318d4a70affe44": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "e8eea123814a4efe9cfd0b61d8a5121a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "4628b4533b8244c88c27169c8d63f533": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "a8e4a52013d546d78f6eb03248012350": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "72485278675e47958ddbb4649f10ec50": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "1b9c1204bd58417db870566c51b6d791": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "53003428380f4a0498233c186c5bd63f": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "2f4d3f3967614cb69300ce77c4e41e4c": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_4823ef44903147168a3d412058570d9e", "IPY_MODEL_d9542330661048c185fce528770962cb", "IPY_MODEL_8a12f81141eb4d7b831d8a45b864c744" ], "layout": "IPY_MODEL_8fb152e385f2415681831138fd72c60f" } }, "4823ef44903147168a3d412058570d9e": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_aa3ec48011524c2fa0ef348824aec231", "placeholder": "​", "style": "IPY_MODEL_1467088a0b6e46e09d01ba113fdff46c", "value": "generation_config.json: 100%" } }, "d9542330661048c185fce528770962cb": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_4fde733c61124d04a0ba29691efd707d", "max": 230, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_d37b8916d8bd41beb18e75a4a454501a", "value": 230 } }, "8a12f81141eb4d7b831d8a45b864c744": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_d0a494b20ac54948b18e666f9097b346", "placeholder": "​", "style": "IPY_MODEL_cca31d76b9db449e9c20107d87a72ca1", "value": " 230/230 [00:00<00:00, 16.3kB/s]" } }, "8fb152e385f2415681831138fd72c60f": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "aa3ec48011524c2fa0ef348824aec231": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "1467088a0b6e46e09d01ba113fdff46c": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "4fde733c61124d04a0ba29691efd707d": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "d37b8916d8bd41beb18e75a4a454501a": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "d0a494b20ac54948b18e666f9097b346": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "cca31d76b9db449e9c20107d87a72ca1": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } } } }, "accelerator": "GPU" }, "nbformat": 4, "nbformat_minor": 0 }