diff --git "a/dummy_components_peft_lora.ipynb" "b/dummy_components_peft_lora.ipynb" new file mode 100644--- /dev/null +++ "b/dummy_components_peft_lora.ipynb" @@ -0,0 +1,4195 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "lIYdn1woOS1n", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "260bc8f3-f4e3-4ed0-c6cf-15a30210d9b9" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/1.8 MB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m╸\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/1.8 MB\u001b[0m \u001b[31m876.1 kB/s\u001b[0m eta \u001b[36m0:00:03\u001b[0m\r\u001b[2K \u001b[91m━━━\u001b[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.1/1.8 MB\u001b[0m \u001b[31m2.0 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m19.3 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m14.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m168.3/168.3 kB\u001b[0m \u001b[31m9.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m265.7/265.7 kB\u001b[0m \u001b[31m16.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m8.2/8.2 MB\u001b[0m \u001b[31m64.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25h" + ] + } + ], + "source": [ + "!pip install diffusers peft accelerate transformers -U -q" + ] + }, + { + "cell_type": "code", + "source": [ + "!huggingface-cli login" + ], + "metadata": { + "id": "W-kFSPuYTVSs", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "20b89ff3-fc2b-471c-95ab-f2fdfe2fe1c3" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\n", + " _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|\n", + " _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n", + " _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|\n", + " _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n", + " _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|\n", + "\n", + " To login, `huggingface_hub` requires a token generated from https://huggingface.co/settings/tokens .\n", + "Token: \n", + "Add token as git credential? (Y/n) Y\n", + "Token is valid (permission: write).\n", + "\u001b[1m\u001b[31mCannot authenticate through git-credential as no helper is defined on your machine.\n", + "You might have to re-authenticate when pushing to the Hugging Face Hub.\n", + "Run the following command in your terminal in case you want to set the 'store' credential helper as default.\n", + "\n", + "git config --global credential.helper store\n", + "\n", + "Read https://git-scm.com/book/en/v2/Git-Tools-Credential-Storage for more details.\u001b[0m\n", + "Token has not been saved to git credential helper.\n", + "Your token has been saved to /root/.cache/huggingface/token\n", + "Login successful\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from diffusers import UNet2DConditionModel, AutoencoderKL, DDIMScheduler\n", + "from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextModelWithProjection\n", + "import torch\n", + "from peft import LoraConfig\n", + "\n", + "\n", + "def get_kwargs_for_sd():\n", + " scheduler_kwargs = {\n", + " \"beta_start\": 0.00085,\n", + " \"beta_end\": 0.012,\n", + " \"beta_schedule\": \"scaled_linear\",\n", + " \"clip_sample\": False,\n", + " \"set_alpha_to_one\": False,\n", + " \"steps_offset\": 1,\n", + " }\n", + " unet_kwargs = {\n", + " \"block_out_channels\": (32, 64),\n", + " \"layers_per_block\": 2,\n", + " \"sample_size\": 32,\n", + " \"in_channels\": 4,\n", + " \"out_channels\": 4,\n", + " \"down_block_types\": (\"DownBlock2D\", \"CrossAttnDownBlock2D\"),\n", + " \"up_block_types\": (\"CrossAttnUpBlock2D\", \"UpBlock2D\"),\n", + " \"cross_attention_dim\": 32,\n", + " }\n", + " vae_kwargs = {\n", + " \"block_out_channels\": [32, 64],\n", + " \"in_channels\": 3,\n", + " \"out_channels\": 3,\n", + " \"down_block_types\": [\"DownEncoderBlock2D\", \"DownEncoderBlock2D\"],\n", + " \"up_block_types\": [\"UpDecoderBlock2D\", \"UpDecoderBlock2D\"],\n", + " \"latent_channels\": 4,\n", + " }\n", + " return scheduler_kwargs, unet_kwargs, vae_kwargs\n", + "\n", + "\n", + "def get_kwargs_for_sdxl():\n", + " scheduler_kwargs = {\n", + " \"beta_start\": 0.00085,\n", + " \"beta_end\": 0.012,\n", + " \"beta_schedule\": \"scaled_linear\",\n", + " \"timestep_spacing\": \"leading\",\n", + " \"steps_offset\": 1,\n", + " }\n", + " unet_kwargs = {\n", + " \"block_out_channels\": (32, 64),\n", + " \"layers_per_block\": 2,\n", + " \"sample_size\": 32,\n", + " \"in_channels\": 4,\n", + " \"out_channels\": 4,\n", + " \"down_block_types\": (\"DownBlock2D\", \"CrossAttnDownBlock2D\"),\n", + " \"up_block_types\": (\"CrossAttnUpBlock2D\", \"UpBlock2D\"),\n", + " \"attention_head_dim\": (2, 4),\n", + " \"use_linear_projection\": True,\n", + " \"addition_embed_type\": \"text_time\",\n", + " \"addition_time_embed_dim\": 8,\n", + " \"transformer_layers_per_block\": (1, 2),\n", + " \"projection_class_embeddings_input_dim\": 80, # 6 * 8 + 32\n", + " \"cross_attention_dim\": 64,\n", + " }\n", + " vae_kwargs = {\n", + " \"block_out_channels\": [32, 64],\n", + " \"in_channels\": 3,\n", + " \"out_channels\": 3,\n", + " \"down_block_types\": [\"DownEncoderBlock2D\", \"DownEncoderBlock2D\"],\n", + " \"up_block_types\": [\"UpDecoderBlock2D\", \"UpDecoderBlock2D\"],\n", + " \"latent_channels\": 4,\n", + " \"sample_size\": 128,\n", + " }\n", + "\n", + " return scheduler_kwargs, unet_kwargs, vae_kwargs\n", + "\n", + "def get_dummy_components(scheduler_kwargs, unet_kwargs, vae_kwargs, has_two_text_encoders=False):\n", + " scheduler_cls = DDIMScheduler\n", + " rank = 4\n", + "\n", + " torch.manual_seed(0)\n", + " unet = UNet2DConditionModel(**unet_kwargs)\n", + "\n", + " scheduler = scheduler_cls(**scheduler_kwargs)\n", + "\n", + " torch.manual_seed(0)\n", + " vae = AutoencoderKL(**vae_kwargs)\n", + "\n", + " text_encoder = CLIPTextModel.from_pretrained(\"peft-internal-testing/tiny-clip-text-2\")\n", + " tokenizer = CLIPTokenizer.from_pretrained(\"peft-internal-testing/tiny-clip-text-2\")\n", + "\n", + " if has_two_text_encoders:\n", + " text_encoder_2 = CLIPTextModelWithProjection.from_pretrained(\"peft-internal-testing/tiny-clip-text-2\")\n", + " tokenizer_2 = CLIPTokenizer.from_pretrained(\"peft-internal-testing/tiny-clip-text-2\")\n", + "\n", + " torch.manual_seed(0)\n", + " text_lora_config = LoraConfig(\n", + " r=rank,\n", + " lora_alpha=rank,\n", + " target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"out_proj\"],\n", + " init_lora_weights=False,\n", + " )\n", + "\n", + " torch.manual_seed(0)\n", + " unet_lora_config = LoraConfig(\n", + " r=rank, lora_alpha=rank, target_modules=[\"to_q\", \"to_k\", \"to_v\", \"to_out.0\"], init_lora_weights=False\n", + " )\n", + "\n", + " if has_two_text_encoders:\n", + " pipeline_components = {\n", + " \"unet\": unet,\n", + " \"scheduler\": scheduler,\n", + " \"vae\": vae,\n", + " \"text_encoder\": text_encoder,\n", + " \"tokenizer\": tokenizer,\n", + " \"text_encoder_2\": text_encoder_2,\n", + " \"tokenizer_2\": tokenizer_2,\n", + " \"image_encoder\": None,\n", + " \"feature_extractor\": None,\n", + " }\n", + " else:\n", + " pipeline_components = {\n", + " \"unet\": unet,\n", + " \"scheduler\": scheduler,\n", + " \"vae\": vae,\n", + " \"text_encoder\": text_encoder,\n", + " \"tokenizer\": tokenizer,\n", + " \"safety_checker\": None,\n", + " \"feature_extractor\": None,\n", + " \"image_encoder\": None,\n", + " }\n", + " return pipeline_components, text_lora_config, unet_lora_config" + ], + "metadata": { + "id": "kqOsfOR9Razp", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 86, + "referenced_widgets": [ + "625a479bef834f9c88abacb11919026a", + "070d6fcf15d343f093e4f9c83c22c747", + "9f0a68319c9a437bac618f7da009dbe7", + "6ac2546c33414e94abaf7139d1ffc81a", + "84a7ff76e3da4e9582f88eda72e3f3c5", + "44ad0022d5494f1baad9774e5c461d98", + "07f23b37fa5545dab2be516557ccd6d6", + "3ab367883dc04e4c85ad7b2296ef9074", + "b1e0f37591c3421dac23e3077da3b802", + "6177ce4c813e4db9acb0f66e909ae1fb", + "4f582c46881a41a2b0209c709f4cee1b" + ] + }, + "outputId": "42f34629-2e0e-4ce1-febc-8bbcf09f25b2" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a one-time only operation. You can interrupt this and resume the migration later on by calling `transformers.utils.move_cache()`.\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "0it [00:00, ?it/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "625a479bef834f9c88abacb11919026a" + } + }, + "metadata": {} + } + ] + }, + { + "cell_type": "code", + "source": [ + "from diffusers import StableDiffusionPipeline\n", + "\n", + "# Handle for SD first.\n", + "scheduler_kwargs, unet_kwargs, vae_kwargs = get_kwargs_for_sd()\n", + "pipeline_components, text_lora_config, unet_lora_config = get_dummy_components(scheduler_kwargs, unet_kwargs, vae_kwargs)\n", + "\n", + "sd_pipe = StableDiffusionPipeline(**pipeline_components)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 278, + "referenced_widgets": [ + "9b1c6700d1884fe6b4798371e0bd8cc8", + "3be247f456f04ed989e0ba5a7f201304", + "09458438fd5a45649b001066072b5324", + "8fbe110631aa42a68ca31c18c8d6f9a9", + "19a1a4fad4ed40188360efa6cd138c5c", + "e37d76411bb44b09875b61fd3169fea8", + "47124da9d5164dc9b689495bfdf9221d", + "b1b9d07e4d4347d3bb050e511b7325bb", + "dac85c8ab3904d4a8930bc8ee8cbc4a1", + "e2677f13fe8f4c9a99cfab388db585f9", + "987f85eb437e4874a42607045dab8cca", + "d119779ef76c47f9a7845b234c9bdcff", + "ea0692741a004303ad2b819de6ed57a2", + "ce7cca42cc894489a787def02a26207d", + "0ace67da0d6c4bc09f86092d416bb017", + "8ae2b696b864454cab82e13038dd307e", + "548bec535dfe4967b156906d3a40ed14", + "657ecd8a1be0424e9d093e29453f9345", + "6fe16b42ba1349be8c359f13c5eabff7", + "3e196d34e54b47489012ca50b2274ae1", + "3d8f11142b0e4f45af44626a8fc3948d", + "6a404e4930b24603b798b694834f8a71", + "0967bc173e5c4f55add1f1e0ed516373", + "b6a02880ad7249c8915107b4836b2680", + "a1625b7f48b7440a9f173849cda24cfa", + "b30ae14f998b4bc395c853c0995e0c33", + "94497be397b941eca594f27a650be913", + "ed16ce7e58334bb89984b6cc2c66fa13", + "d1009482e5bb4ee19cfd4922e3d9a065", + "71e4ec4b8b4245caa3f82634c061d329", + "81930834a286440ca3cb0ed7ead484df", + "69a5084dafbc4da69aa99bc8d838e5a6", + "e150f7910df04687855e99215493f301", + "875d6a76499c48e39745b5bd64c9d4f0", + "91bda25e6f094233885c19323a6432fa", + "242da1bf2146466493d46d88d461e174", + "c029a8022a4348d9b137da8b9a1cdd7d", + "1e8f9c2ea4c745ab8276ad2ac8dd2fd4", + "16e72113d1884b598b994521f95e6a7d", + "44a259bb73ba4fb09857a0564ed235ed", + "d1b474f9ce1b445cb1b812a257924e69", + "1301f33f4f754d09b2e80e9303c0a9b3", + "a108e6a7584c4340b36f7eea9220aeb2", + "b8322d1204774b628156d0013cbf72ea", + "4bd8c9f53cdd48fe89693ff259bc298e", + "4b159528337647238cd0cabe9c5259bf", + "346ce041d7b64d46b73b98905753d34d", + "beb96d11211f4dff8949607c2d72660a", + "6cf41d9c36d1471ea0634fd44c22f115", + "bfe7865161454a7aac1c5b284e88a6ba", + "bf3fa72c5b814395a6f63c22064792e0", + "6ccfa15da3c54778a2a8848f7cbde4b3", + "494b4ce59565433da0bcfec96669d873", + "e05a0c25f95d45e895a875a9b38ddf05", + "36189b3d862b4235a956f275fc63b769", + "a6fcafe4f30f4a4abfbfbf765dc5a66f", + "dc77dbde4870402ea9cd0945e9aa624e", + "bcbc3745f35c40bc8123c206280497e4", + "a7605d866c4646ac8f5b4002f384ce49", + "2b257f0180424bbb8898a2938f72c720", + "06bafa4d80cc480fabeac220b75fc972", + "93d50a3b4d184cedb669899b83b04a02", + "182f109744984d6a99ba1b0518faecb5", + "f57d6d3a88774029af5448a7dce49d90", + "19321ebd48814731b0474e10e1a81553", + "d12a97c4689e4cad94189e9ed1ebac79", + "eb2975aa8a8544a98a0343488c2f43e9", + "6997dac2023e41ffa7943907a7e21f57", + "ec17fd37335a413e8de6b127585842e9", + "c7a63c93fe754d97a38ea13488cf8e91", + "c95621c6f29f432cbec4799f0cb9e50e", + "cb99472eeadd4857ab94ef7961b942de", + "78f040a2195e41c793e04026c4436c00", + "3d7f935a31704e2aadb299b063ab05aa", + "4c3f26bbdbfd4da28acbd9babdd9e82f", + "dcbb8cedff6548e0bcd459048827992f", + "bfa9c751fcbc4ad89bcd7156d0712d80" + ] + }, + "id": "1JT3iqE4S-A_", + "outputId": "928bbb14-c1e2-4eab-f7f8-42900c05fc1f" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": [ + "config.json: 0%| | 0.00/548 [00:00 by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "sd_pipe.push_to_hub(\"hf-internal-testing/tiny-sd-pipe\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "id": "8_UHCMoUT9Iy", + "outputId": "ab058031-30b3-46e3-b4e6-8caeee82286c" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "'https://huggingface.co/hf-internal-testing/tiny-sd-pipe/tree/main/'" + ], + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" + } + }, + "metadata": {}, + "execution_count": 5 + } + ] + }, + { + "cell_type": "code", + "source": [ + "from peft.utils import get_peft_model_state_dict\n", + "from diffusers.utils import convert_state_dict_to_diffusers\n", + "\n", + "sd_pipe.text_encoder.add_adapter(text_lora_config)\n", + "sd_pipe.unet.add_adapter(unet_lora_config)\n", + "\n", + "text_encoder_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(sd_pipe.text_encoder))\n", + "unet_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(sd_pipe.unet))\n", + "\n", + "output_dir = \"tiny-sd-lora-peft\"\n", + "StableDiffusionPipeline.save_lora_weights(\n", + " save_directory=output_dir,\n", + " unet_lora_layers=unet_state_dict,\n", + " text_encoder_lora_layers=text_encoder_state_dict,\n", + ")" + ], + "metadata": { + "id": "uiZPJr5cUNbY" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from diffusers import StableDiffusionXLPipeline\n", + "\n", + "scheduler_kwargs, unet_kwargs, vae_kwargs = get_kwargs_for_sdxl()\n", + "pipeline_components, text_lora_config, unet_lora_config = get_dummy_components(scheduler_kwargs, unet_kwargs, vae_kwargs, has_two_text_encoders=True)\n", + "\n", + "sdxl_pipe = StableDiffusionXLPipeline(**pipeline_components)" + ], + "metadata": { + "id": "LAuR7D4nU4i8" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "sdxl_pipe.push_to_hub(\"hf-internal-testing/tiny-sdxl-pipe\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "id": "mmsAjEOXV05-", + "outputId": "0ddcba8e-c298-4e7d-9ebe-3c47a120268c" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "'https://huggingface.co/hf-internal-testing/tiny-sdxl-pipe/tree/main/'" + ], + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" + } + }, + "metadata": {}, + "execution_count": 8 + } + ] + }, + { + "cell_type": "code", + "source": [ + "sdxl_pipe.text_encoder.add_adapter(text_lora_config)\n", + "sdxl_pipe.text_encoder_2.add_adapter(text_lora_config)\n", + "sdxl_pipe.unet.add_adapter(unet_lora_config)\n", + "\n", + "text_encoder_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(sdxl_pipe.text_encoder))\n", + "text_encoder_2_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(sdxl_pipe.text_encoder_2))\n", + "unet_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(sdxl_pipe.unet))\n", + "\n", + "output_dir = \"tiny-sdxl-lora-peft\"\n", + "\n", + "StableDiffusionXLPipeline.save_lora_weights(\n", + " save_directory=output_dir,\n", + " unet_lora_layers=unet_state_dict,\n", + " text_encoder_lora_layers=text_encoder_state_dict,\n", + " text_encoder_2_lora_layers=text_encoder_2_state_dict,\n", + ")" + ], + "metadata": { + "id": "3LaTMU-zVVM3" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "!ls -lh \"tiny-sd-lora-peft\"" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "damqUwbVWOF7", + "outputId": "73f5faa2-f1ad-44ee-cd02-de2752337c52" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "total 132K\n", + "-rw-r--r-- 1 root root 130K Dec 31 14:26 pytorch_lora_weights.safetensors\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "!ls -lh \"tiny-sdxl-lora-peft\"" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "QLFe_lGxWSA9", + "outputId": "6754d061-c64f-49f8-d2e3-29d562bdc2a3" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "total 272K\n", + "-rw-r--r-- 1 root root 271K Dec 31 14:26 pytorch_lora_weights.safetensors\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from huggingface_hub import create_repo, upload_folder\n", + "\n", + "folder_path = \"tiny-sd-lora-peft\"\n", + "repo_id = create_repo(repo_id=f\"hf-internal-testing/{folder_path}\", exist_ok=True).repo_id\n", + "upload_folder(\n", + " repo_id=repo_id, folder_path=folder_path\n", + ")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 67, + "referenced_widgets": [ + "0d8b31c484a949ec8e272b7a979b61c8", + "04bd33e17887489a8fbf8c7c4e8423d2", + "cb9b73e5233442c1b8b589a7eb71bbed", + "ab38e51fefb949f2985f4340aea22f61", + "7a1818077a554962996b2c63d863cd91", + "46fee76f1e744fff9072df9333e8d1c4", + "b10fc99e43fa4107baa6945ba20218d2", + "c25cea4c5fcd4ab5bfc750aa3c55391e", + "209394b5f71142c98a9a938ca834c210", + "6821c45bff38439abdd0d269e7f72acc", + "98c9aabeb5db48d98e1065296a03f4ee" + ] + }, + "id": "52iH_7s0WZ8A", + "outputId": "d9d9f380-cb3a-4939-a041-654c666a5391" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": [ + "pytorch_lora_weights.safetensors: 0%| | 0.00/132k [00:00