diff --git "a/TIES_LoRa_Merge_Script.ipynb" "b/TIES_LoRa_Merge_Script.ipynb" --- "a/TIES_LoRa_Merge_Script.ipynb" +++ "b/TIES_LoRa_Merge_Script.ipynb" @@ -3,8 +3,7 @@ "nbformat_minor": 0, "metadata": { "colab": { - "provenance": [], - "gpuType": "T4" + "provenance": [] }, "kernelspec": { "name": "python3", @@ -12,8 +11,7 @@ }, "language_info": { "name": "python" - }, - "accelerator": "GPU" + } }, "cells": [ { @@ -41,7 +39,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "aa3877d3-088e-423d-96b7-78befeab2734" + "outputId": "1eda0d91-edb2-4c2a-ddb0-aa2174c25519" }, "execution_count": 1, "outputs": [ @@ -67,129 +65,9 @@ "import numpy as np\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", - "# filter_and_save\n", - "# Use this method to change the scale = (rank/alpha) value of a given _lora\n", - "# This method will also eliminate noise. All values < resolution * e-6 will be set to 0\n", - "# in the delta_W of this LoRa. The processed LoRa will be saved as a .safetensor file in fp16\n", - "# The rank of the LoRa affect the file size. At rank 32 the filesize is 300 MB , at rank 16 the filesize is 150 MB and so on.\n", - "#\n", - "# When merging LoRa , it is important that\n", - "# a) the scale of all the merged LoRas are the same. I use the scale = (alpha/rank) = 0.5 at all times.\n", - "# For rank 32 , the alpha must be 16 , for example.\n", - "#\n", - "# b) The rank of the merged LoRas should be 32 or below , any larger values might trigger a 'Out of Memory' error on Google Colab GPU:s\n", - "# --------------\n", - "# _lora - The lora which you wish to process\n", - "# savefile_name - The name of the savefile to be created. Make sure the savefile_name ends with the '.safetensors' suffix\n", - "# new_rank - The rank you wish to set the LoRa to\n", - "# new_alpha - The alpha value you wish to set the LoRa to. For proper scaling ,\n", - "# set the alpha value to half the value of the rank so (alpha/rank) = 0.5\n", - "# This is a very common scale for trained LoRa\n", - "#\n", - "# resolution - All values < resolution * e-6 will be set to 0\n", - "# in the delta_W of this LoRa. This is useful to eliminate 'junk' in the output of the\n", - "# Lora when scaling it to strength above 0.8. A high resolution will also make the Lora more compatible with other LoRa\n", - "# , at the expense of making the LoRa less true to the originally trained image output.\n", - "#\n", - "def filter_and_save(_lora , savefile_name, new_rank , new_alpha, resolution):\n", - " lora = {}\n", - " count = 0\n", - " for key in _lora:count = count + 1\n", - " NUM_ITEMS = count\n", - " count = 0\n", - " thresh = resolution*0.000001 # 1e-6\n", - " #-------#\n", - " for key in _lora:\n", - " if f'{key}'.find('alpha') > -1:\n", - " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", - " count = count + 1\n", - " print(f'{count} / {NUM_ITEMS}')\n", - " continue\n", - " #------#\n", - " if not f'{key}'.find('lora_down') > -1: continue\n", - " up = f'{key}'.replace('lora_down' , 'lora_up')\n", - " down = f'{key}'\n", - " #-------#\n", - " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", - " #---#\n", - " N = delta_W.numel()\n", - " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", - " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", - " y = torch.zeros(y.shape).to(device = device , dtype=torch.float32)\n", - " y[indices[values>thresh]] = 1\n", - " y[indices[values<-thresh]] = 1\n", - " y = y.unflatten(0,delta_W.shape).to(device = device , dtype=torch.float32)\n", - " delta_W = torch.mul(delta_W,y).to(device = device , dtype=torch.float32)\n", - " #------#\n", - " tmp={}\n", - " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", - " tmp['u'] = tmp['u'][:,: new_rank]\n", - " tmp['s'] = tmp['s'][: new_rank]\n", - " #-------#\n", - " tmp['u'] = torch.round(torch.matmul(tmp['u'], torch.diag(tmp['s'])),decimals=6)\n", - " tmp['Vh'] = torch.round(tmp['Vh'].t()[: new_rank,:],decimals=6)\n", - " #-------#\n", - " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", - " lora[up] = tmp['u'].to(device = device , dtype=torch.float32)\n", - " lora[down] = tmp['Vh'].to(device = device , dtype=torch.float32)\n", - " #-------#\n", - " count = count + 2\n", - " print(f'{count} / {NUM_ITEMS}')\n", - " #-------#\n", - " print(f'done!')\n", - " print(f'casting params to fp16....')\n", - " for key in _lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", - " #-------#\n", - " print(f'done!')\n", - " print(f'saving {savefile_name}...')\n", - " save_file(lora , f'{savefile_name}')\n", - "#--------#\n", - "\n", - "# count_zeros\n", - "# Use this method to guage how large resolution you should set for a given Lora.\n", - "# This function can serve as a 'preview' prior to running either the filter_and_save or\n", - "# merge_and_save methods. Since it does not use SVD to re-pack the LoRa\n", - "# , you can run this method on a non-GPU instance on the Colab\n", - "#-----------\n", - "# _lora - The lora which you wish to process\n", - "# resolution - All values < resolution * e-6 will be set to 0\n", - "def count_zeros(_lora, resolution):\n", - " count = 0\n", - " for key in _lora:count = count + 1\n", - " NUM_ITEMS = count\n", - " count = 0\n", - " #-----#\n", - " thresh = resolution*0.000001 # 1e-6\n", - "\n", - " print(f'at resolution = {resolution}e-6 :')\n", - " for key in _lora:\n", - " if f'{key}'.find('alpha') > -1:\n", - " count = count + 1\n", - " continue\n", - " #------#\n", - " if not f'{key}'.find('lora_down') > -1: continue\n", - " up = f'{key}'.replace('lora_down' , 'lora_up')\n", - " down = f'{key}'\n", - " #-------#\n", - " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", - " N = delta_W.numel()\n", - " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", - " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", - " y = torch.ones(y.shape).to(device = device , dtype=torch.float32)\n", - " y[indices[values>thresh]] = 0\n", - " neg_pcnt = round((100*torch.sum(y) / N).item(),2)\n", - " y[indices[values<-thresh]] = 0\n", - " count = count + 2\n", - " pcnt = round((100*torch.sum(y) / N).item(),2)\n", - " neg_pcnt = round(neg_pcnt - pcnt,2) # remove zero % from neg_pcnt\n", - " pos_pcnt = round(100- pcnt - neg_pcnt,2)\n", - " print(f'at {count} / {NUM_ITEMS} : {pcnt} % zeros ,{pos_pcnt} % pos. , {neg_pcnt} % neg ')\n", - " #------#\n", - "#-----#\n", - "\n", "# This method rescales a _lora to a given ratio. I haven't tested it\n", - "# But this is more or less how it works\n", - "def rescale_and_save(_lora , savefile_name, new_ratio):\n", + "# yet but this is more or less how it works\n", + "def rescale_and_save(_lora , savefile_name, new_ratio , rank):\n", " count = 0\n", " lora = {}\n", " for key in _lora:count = count + 1\n", @@ -199,13 +77,13 @@ " for key in _lora:\n", " if not f'{key}'.find('alpha') > -1: continue\n", " alpha = f'{key}'\n", - " up = f'{key}'.replace('alpha' , 'lora_up')\n", - " down = f'{key}'.replace('alpha' , 'lora_down')\n", + " up = f'{key}'.replace('alpha' , 'lora_up.weight')\n", + " down = f'{key}'.replace('alpha' , 'lora_down.weight')\n", " #------#\n", - " rank = torch.matmul(_lora[up]*0,_lora[down]*0).shape[0]\n", " new_alpha = torch.tensor(new_ratio*rank).to(device = device , dtype=torch.float32)\n", " lora[up] = torch.round(torch.sqrt(_lora[alpha]/new_alpha)*_lora[up], decimals = decimals).to(device = device , dtype=torch.float32)\n", " lora[down] = torch.round(torch.sqrt(_lora[alpha]/new_alpha)*_lora[down], decimals = decimals).to(device = device , dtype=torch.float32)\n", + " #-----#\n", " lora[alpha] = (new_alpha/_lora[alpha])*_lora[alpha].to(device = device , dtype=torch.float32)\n", " count = count + 3\n", " print(f'{count} / {NUM_ITEMS}')\n", @@ -219,775 +97,1737 @@ " save_file(lora , f'{savefile_name}')\n", " #-----------#\n", "\n", - "# merge_and_save\n", - "# This method uses a general neural net merging method known as TIES - which is an loose abbreviation for\n", - "# 'Trim Elect Sign & Merge' according to the paper : https://arxiv.org/pdf/2306.01708\n", - "#------------#\n", - "# _lora1 - The lora which you wish to process.\n", - "# _lora2 - The lora which you wish to process.\n", - "# _lora3 - The lora which you wish to process.\n", - "\n", - "# NOTE about loras :\n", - "#_lora1 , _lora2 and lora_3 can have different ranks.\n", - "# Make sure the scale of all three loras are the same\n", - "\n", - "#The scale is defined as (alpha/rank) and should be 0.5\n", - "# If the alpha value is too high or too low , for example if (alpha/rank) = 1\n", - "# then run rescale_and_save(_lora , savefile_name, new_alpha) , where new_alpha is given by the rank of the LoRa\n", - "# For example , a LoRa of rank 32 must have an alpha value of 16 for the scale = (alpha/rank) = 0.5 to be True\n", - "\n", - "# However , make sure each lora rank is equal or below 32 ,\n", - "#or that the sum of ranks does not exceed 3*32 = 96 , to not exceed GPU requirements on Google Colab. Slightly higher values might be fine.\n", - "# Haven't tested it since I prefer merging LoRa at rank 32\n", - "\n", - "# savefile_name - The name of the savefile to be created. Make sure the savefile_name ends with the '.safetensors' suffix\n", - "# new_rank - The rank you wish to set the LoRa to\n", - "# new_alpha - The alpha value you wish to set the LoRa to. For proper scaling ,\n", - "# set the alpha value to half the value of the rank so (alpha/rank) = 0.5\n", - "# This is a very common scale for trained LoRa\n", - "#\n", - "# resolution - All values < resolution * e-6 will be set to 0\n", - "# in the delta_W of this LoRa. This is useful to eliminate 'junk' in the output of the\n", - "# Lora when scaling it to strength above 0.8. A high resolution will also make the Lora more compatible with other LoRa\n", - "# , at the expense of making the LoRa less true to the originally trained image output.\n", - "def merge_and_save(_lora1 , _lora2 , _lora3, savefile_name, new_rank , new_alpha, resolution):\n", - " lora = {}\n", - " count = 0\n", - " for key in _lora1:count = count + 1\n", - " NUM_ITEMS = count\n", - " count = 0\n", - " thresh = resolution*0.000001 # 1e-6\n", - " decimals = 6\n", - "\n", - " #-------#\n", - " for key in _lora1:\n", - " if f'{key}'.find('alpha') > -1:\n", - " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", - " count = count + 1\n", - " print(f'{count} / {NUM_ITEMS}')\n", - " continue\n", - " #------#\n", - " if not f'{key}'.find('lora_down') > -1: continue\n", - " up = f'{key}'.replace('lora_down' , 'lora_up')\n", - " down = f'{key}'\n", - " #-------#\n", - "\n", - " # Setup\n", - " delta_W = torch.matmul(_lora1[up]*0,_lora1[down]*0).to(device = device, dtype=torch.float32)\n", - " tgt_shape = delta_W.shape\n", - " N = delta_W.numel()\n", - " delta_W = torch.zeros(N).to(device = device , dtype=torch.float32)\n", - " #-----#\n", - "\n", - " #Positives\n", - " Y = torch.zeros(3,N).to(device = device , dtype=torch.float32)\n", - " Y[0] = torch.matmul(_lora1[up],_lora1[down]).flatten().to(device = device , dtype=torch.float32)\n", - " Y[1] = torch.matmul(_lora2[up],_lora2[down]).flatten().to(device = device , dtype=torch.float32)\n", - " Y[2] = torch.matmul(_lora3[up],_lora3[down]).flatten().to(device = device , dtype=torch.float32)\n", - " Y[torch.abs(Y)0,dim=1) + 0.001\n", - " elect = torch.sum(Y<0,dim=1) + 0.001\n", - " elect = (num>=elect)\n", - " Y[Y<0] = 0\n", - " Y = torch.sum(Y, dim=1).to(device = device , dtype=torch.float32)\n", - " delta_W[elect] = torch.round((Y[elect]/num[elect]),decimals=decimals).to(device = device , dtype=torch.float32)\n", - " #-----#\n", - "\n", - " #Negatives\n", - " Y = torch.zeros(3,N).to(device = device , dtype=torch.float32)\n", - " Y[0] = torch.matmul(_lora1[up],_lora1[down]).flatten().to(device = device , dtype=torch.float32)\n", - " Y[1] = torch.matmul(_lora2[up],_lora2[down]).flatten().to(device = device , dtype=torch.float32)\n", - " Y[2] = torch.matmul(_lora3[up],_lora3[down]).flatten().to(device = device , dtype=torch.float32)\n", - " Y[torch.abs(Y)0,dim=1) + 0.001\n", - " elect = (elect0] = 0\n", - " Y = torch.sum(Y, dim=1).to(device = device , dtype=torch.float32)\n", - " delta_W[elect] = torch.round(Y[elect]/num[elect],decimals=decimals).to(device = device , dtype=torch.float32)\n", - " #----#\n", - "\n", - " # Free up memory prior to SVD\n", - " delta_W = delta_W.unflatten(0,tgt_shape).to(device = device , dtype=torch.float32)\n", - " delta_W = delta_W.clone().detach()\n", - " Y = {}\n", - " num = {}\n", - " elect = {}\n", - " #-----#\n", - "\n", - " # Run SVD (Single Value Decomposition)\n", - " #to get the new lora_up and lora_down for delta_W\n", - " tmp={}\n", - " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", - " tmp['u'] = tmp['u'][:,: new_rank]\n", - " tmp['s'] = tmp['s'][: new_rank]\n", - " tmp['u'] = torch.matmul(tmp['u'], torch.diag(tmp['s']))\n", - " tmp['Vh'] = tmp['Vh'].t()[: new_rank,:]\n", - " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", - " lora[up] = torch.round(tmp['u'],decimals=decimals).to(device = device , dtype=torch.float32)\n", - " lora[down] = torch.round(tmp['Vh'],decimals=decimals).to(device = device , dtype=torch.float32)\n", - " #-------#\n", - "\n", - " count = count + 2\n", - " print(f'{count} / {NUM_ITEMS}')\n", - " #----#\n", - " #--------#\n", - " print(f'done!')\n", - " print(f'casting params to fp16....')\n", - " for key in lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", - " #-------#\n", - " print(f'done!')\n", - " print(f'saving {savefile_name}...')\n", - " save_file(lora , f'{savefile_name}')\n", - "#------#\n", - "\n", - "new_rank = 32\n", - "new_alpha = math.floor(new_rank/2)\n", - "resolution = 200\n", - "name = 'scale'\n", - "yeero = load_file('/content/drive/MyDrive/Saved from Chrome/scale.safetensors')\n", - "euro = load_file('/content/drive/MyDrive/Saved from Chrome/euro_100_r32_16alpha.safetensors')\n", - "puff = load_file('/content/drive/MyDrive/Saved from Chrome/buff_200_r32_16alpha.safetensors')\n", - "savefile_name = f'{name}_{resolution}_r{new_rank}_a{new_alpha}.safetensors'\n", + "tgt = load_file('/content/drive/MyDrive/Saved from Chrome/window-voyeur- F.safetensors')\n", + "for key in tgt:\n", + " if f'{key}'.find('alpha')>-1: print(tgt[f'{key}'])\n", + " print(f\" {key} : {tgt[f'{key}'].shape}\")\n", "\n", - "for key in yeero:\n", - " yeero[f'{key}'] = yeero[f'{key}'].to(device = device , dtype = torch.float32)\n", - " euro[f'{key}'] = euro[f'{key}'].to(device = device , dtype = torch.float32)\n", - " puff[f'{key}'] = puff[f'{key}'].to(device = device , dtype = torch.float32)\n", - "#-----#\n", - "print(f'for {name}.safetensors at scale = (rank/alpha) = 0.5')\n", - "#merge_and_save(yeero , euro , puff, savefile_name, new_rank , new_alpha, resolution)\n", + "name = 'window'\n", + "savefile_name = f'{name}.safetensors'\n", + "new_ratio = 0.5\n", + "rank = 4\n", "\n", - "filter_and_save(yeero , savefile_name, new_rank , new_alpha, resolution)\n" + "rescale_and_save(tgt , savefile_name, new_ratio , rank)\n", + " #(alpha/scale) = (32/16)\n", + "\n" ], "metadata": { - "id": "SKYzFxehkfG8", + "collapsed": true, + "id": "PiXcXp7krKMt", + "outputId": "6e15f27a-ae62-4f58-c4a0-fba448bb7028", "colab": { "base_uri": "https://localhost:8080/" - }, - "outputId": "92d90a82-65e0-45ac-be1c-18d96ff95459" + } }, - "execution_count": 6, + "execution_count": 19, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "for scale.safetensors at scale = (rank/alpha) = 0.5\n", - "1 / 912\n", - "3 / 912\n", - "4 / 912\n", - "6 / 912\n", - "7 / 912\n", - "9 / 912\n", - "10 / 912\n", - "12 / 912\n", - "13 / 912\n", - "15 / 912\n", - "16 / 912\n", - "18 / 912\n", - "19 / 912\n", - "21 / 912\n", - "22 / 912\n", - "24 / 912\n", - "25 / 912\n", - "27 / 912\n", - "28 / 912\n", - "30 / 912\n", - "31 / 912\n", - "33 / 912\n", - "34 / 912\n", - "36 / 912\n", - "37 / 912\n", - "39 / 912\n", - "40 / 912\n", - "42 / 912\n", - "43 / 912\n", - "45 / 912\n", - "46 / 912\n", - "48 / 912\n", - "49 / 912\n", - "51 / 912\n", - "52 / 912\n", - "54 / 912\n", - "55 / 912\n", - "57 / 912\n", - "58 / 912\n", - "60 / 912\n", - "61 / 912\n", - "63 / 912\n", - "64 / 912\n", - "66 / 912\n", - "67 / 912\n", - "69 / 912\n", - "70 / 912\n", - "72 / 912\n", - "73 / 912\n", - "75 / 912\n", - "76 / 912\n", - "78 / 912\n", - "79 / 912\n", - "81 / 912\n", - "82 / 912\n", - "84 / 912\n", - "85 / 912\n", - "87 / 912\n", - "88 / 912\n", - "90 / 912\n", - "91 / 912\n", - "93 / 912\n", - "94 / 912\n", - "96 / 912\n", - "97 / 912\n", - "99 / 912\n", - "100 / 912\n", - "102 / 912\n", - "103 / 912\n", - "105 / 912\n", - "106 / 912\n", - "108 / 912\n", - "109 / 912\n", - "111 / 912\n", - "112 / 912\n", - "114 / 912\n", - "115 / 912\n", - "117 / 912\n", - "118 / 912\n", - "120 / 912\n", - "121 / 912\n", - "123 / 912\n", - "124 / 912\n", - "126 / 912\n", - "127 / 912\n", - "129 / 912\n", - "130 / 912\n", - "132 / 912\n", - "133 / 912\n", - "135 / 912\n", - "136 / 912\n", - "138 / 912\n", - "139 / 912\n", - "141 / 912\n", - "142 / 912\n", - "144 / 912\n", - "145 / 912\n", - "147 / 912\n", - "148 / 912\n", - "150 / 912\n", - "151 / 912\n", - "153 / 912\n", - "154 / 912\n", - "156 / 912\n", - "157 / 912\n", - "159 / 912\n", - "160 / 912\n", - "162 / 912\n", - "163 / 912\n", - "165 / 912\n", - "166 / 912\n", - "168 / 912\n", - "169 / 912\n", - "171 / 912\n", - "172 / 912\n", - "174 / 912\n", - "175 / 912\n", - "177 / 912\n", - "178 / 912\n", - "180 / 912\n", - "181 / 912\n", - "183 / 912\n", - "184 / 912\n", - "186 / 912\n", - "187 / 912\n", - "189 / 912\n", - "190 / 912\n", - "192 / 912\n", - "193 / 912\n", - "195 / 912\n", - "196 / 912\n", - "198 / 912\n", - "199 / 912\n", - "201 / 912\n", - "202 / 912\n", - "204 / 912\n", - "205 / 912\n", - "207 / 912\n", - "208 / 912\n", - "210 / 912\n", - "211 / 912\n", - "213 / 912\n", - "214 / 912\n", - "216 / 912\n", - "217 / 912\n", - "219 / 912\n", - "220 / 912\n", - "222 / 912\n", - "223 / 912\n", - "225 / 912\n", - "226 / 912\n", - "228 / 912\n", - "229 / 912\n", - "231 / 912\n", - "232 / 912\n", - "234 / 912\n", - "235 / 912\n", - "237 / 912\n", - "238 / 912\n", - "240 / 912\n", - "241 / 912\n", - "243 / 912\n", - "244 / 912\n", - "246 / 912\n", - "247 / 912\n", - "249 / 912\n", - "250 / 912\n", - "252 / 912\n", - "253 / 912\n", - "255 / 912\n", - "256 / 912\n", - "258 / 912\n", - "259 / 912\n", - "261 / 912\n", - "262 / 912\n", - "264 / 912\n", - "265 / 912\n", - "267 / 912\n", - "268 / 912\n", - "270 / 912\n", - "271 / 912\n", - "273 / 912\n", - "274 / 912\n", - "276 / 912\n", - "277 / 912\n", - "279 / 912\n", - "280 / 912\n", - "282 / 912\n", - "283 / 912\n", - "285 / 912\n", - "286 / 912\n", - "288 / 912\n", - "289 / 912\n", - "291 / 912\n", - "292 / 912\n", - "294 / 912\n", - "295 / 912\n", - "297 / 912\n", - "298 / 912\n", - "300 / 912\n", - "301 / 912\n", - "303 / 912\n", - "304 / 912\n", - "306 / 912\n", - "307 / 912\n", - "309 / 912\n", - "310 / 912\n", - "312 / 912\n", - "313 / 912\n", - "315 / 912\n", - "316 / 912\n", - "318 / 912\n", - "319 / 912\n", - "321 / 912\n", - "322 / 912\n", - "324 / 912\n", - "325 / 912\n", - "327 / 912\n", - "328 / 912\n", - "330 / 912\n", - "331 / 912\n", - "333 / 912\n", - "334 / 912\n", - "336 / 912\n", - "337 / 912\n", - "339 / 912\n", - "340 / 912\n", - "342 / 912\n", - "343 / 912\n", - "345 / 912\n", - "346 / 912\n", - "348 / 912\n", - "349 / 912\n", - "351 / 912\n", - "352 / 912\n", - "354 / 912\n", - "355 / 912\n", - "357 / 912\n", - "358 / 912\n", - "360 / 912\n", - "361 / 912\n", - "363 / 912\n", - "364 / 912\n", - "366 / 912\n", - "367 / 912\n", - "369 / 912\n", - "370 / 912\n", - "372 / 912\n", - "373 / 912\n", - "375 / 912\n", - "376 / 912\n", - "378 / 912\n", - "379 / 912\n", - "381 / 912\n", - "382 / 912\n", - "384 / 912\n", - "385 / 912\n", - "387 / 912\n", - "388 / 912\n", - "390 / 912\n", - "391 / 912\n", - "393 / 912\n", - "394 / 912\n", - "396 / 912\n", - "397 / 912\n", - "399 / 912\n", - "400 / 912\n", - "402 / 912\n", - "403 / 912\n", - "405 / 912\n", - "406 / 912\n", - "408 / 912\n", - "409 / 912\n", - "411 / 912\n", - "412 / 912\n", - "414 / 912\n", - "415 / 912\n", - "417 / 912\n", - "418 / 912\n", - "420 / 912\n", - "421 / 912\n", - "423 / 912\n", - "424 / 912\n", - "426 / 912\n", - "427 / 912\n", - "429 / 912\n", - "430 / 912\n", - "432 / 912\n", - "433 / 912\n", - "435 / 912\n", - "436 / 912\n", - "438 / 912\n", - "439 / 912\n", - "441 / 912\n", - "442 / 912\n", - "444 / 912\n", - "445 / 912\n", - "447 / 912\n", - "448 / 912\n", - "450 / 912\n", - "451 / 912\n", - "453 / 912\n", - "454 / 912\n", - "456 / 912\n", - "457 / 912\n", - "459 / 912\n", - "460 / 912\n", - "462 / 912\n", - "463 / 912\n", - "465 / 912\n", - "466 / 912\n", - "468 / 912\n", - "469 / 912\n", - "471 / 912\n", - "472 / 912\n", - "474 / 912\n", - "475 / 912\n", - "477 / 912\n", - "478 / 912\n", - "480 / 912\n", - "481 / 912\n", - "483 / 912\n", - "484 / 912\n", - "486 / 912\n", - "487 / 912\n", - "489 / 912\n", - "490 / 912\n", - "492 / 912\n", - "493 / 912\n", - "495 / 912\n", - "496 / 912\n", - "498 / 912\n", - "499 / 912\n", - "501 / 912\n", - "502 / 912\n", - "504 / 912\n", - "505 / 912\n", - "507 / 912\n", - "508 / 912\n", - "510 / 912\n", - "511 / 912\n", - "513 / 912\n", - "514 / 912\n", - "516 / 912\n", - "517 / 912\n", - "519 / 912\n", - "520 / 912\n", - "522 / 912\n", - "523 / 912\n", - "525 / 912\n", - "526 / 912\n", - "528 / 912\n", - "529 / 912\n", - "531 / 912\n", - "532 / 912\n", - "534 / 912\n", - "535 / 912\n", - "537 / 912\n", - "538 / 912\n", - "540 / 912\n", - "541 / 912\n", - "543 / 912\n", - "544 / 912\n", - "546 / 912\n", - "547 / 912\n", - "549 / 912\n", - "550 / 912\n", - "552 / 912\n", - "553 / 912\n", - "555 / 912\n", - "556 / 912\n", - "558 / 912\n", - "559 / 912\n", - "561 / 912\n", - "562 / 912\n", - "564 / 912\n", - "565 / 912\n", - "567 / 912\n", - "568 / 912\n", - "570 / 912\n", - "571 / 912\n", - "573 / 912\n", - "574 / 912\n", - "576 / 912\n", - "577 / 912\n", - "579 / 912\n", - "580 / 912\n", - "582 / 912\n", - "583 / 912\n", - "585 / 912\n", - "586 / 912\n", - "588 / 912\n", - "589 / 912\n", - "591 / 912\n", - "592 / 912\n", - "594 / 912\n", - "595 / 912\n", - "597 / 912\n", - "598 / 912\n", - "600 / 912\n", - "601 / 912\n", - "603 / 912\n", - "604 / 912\n", - "606 / 912\n", - "607 / 912\n", - "609 / 912\n", - "610 / 912\n", - "612 / 912\n", - "613 / 912\n", - "615 / 912\n", - "616 / 912\n", - "618 / 912\n", - "619 / 912\n", - "621 / 912\n", - "622 / 912\n", - "624 / 912\n", - "625 / 912\n", - "627 / 912\n", - "628 / 912\n", - "630 / 912\n", - "631 / 912\n", - "633 / 912\n", - "634 / 912\n", - "636 / 912\n", - "637 / 912\n", - "639 / 912\n", - "640 / 912\n", - "642 / 912\n", - "643 / 912\n", - "645 / 912\n", - "646 / 912\n", - "648 / 912\n", - "649 / 912\n", - "651 / 912\n", - "652 / 912\n", - "654 / 912\n", - "655 / 912\n", - "657 / 912\n", - "658 / 912\n", - "660 / 912\n", - "661 / 912\n", - "663 / 912\n", - "664 / 912\n", - "666 / 912\n", - "667 / 912\n", - "669 / 912\n", - "670 / 912\n", - "672 / 912\n", - "673 / 912\n", - "675 / 912\n", - "676 / 912\n", - "678 / 912\n", - "679 / 912\n", - "681 / 912\n", - "682 / 912\n", - "684 / 912\n", - "685 / 912\n", - "687 / 912\n", - "688 / 912\n", - "690 / 912\n", - "691 / 912\n", - "693 / 912\n", - "694 / 912\n", - "696 / 912\n", - "697 / 912\n", - "699 / 912\n", - "700 / 912\n", - "702 / 912\n", - "703 / 912\n", - "705 / 912\n", - "706 / 912\n", - "708 / 912\n", - "709 / 912\n", - "711 / 912\n", - "712 / 912\n", - "714 / 912\n", - "715 / 912\n", - "717 / 912\n", - "718 / 912\n", - "720 / 912\n", - "721 / 912\n", - "723 / 912\n", - "724 / 912\n", - "726 / 912\n", - "727 / 912\n", - "729 / 912\n", - "730 / 912\n", - "732 / 912\n", - "733 / 912\n", - "735 / 912\n", - "736 / 912\n", - "738 / 912\n", - "739 / 912\n", - "741 / 912\n", - "742 / 912\n", - "744 / 912\n", - "745 / 912\n", - "747 / 912\n", - "748 / 912\n", - "750 / 912\n", - "751 / 912\n", - "753 / 912\n", - "754 / 912\n", - "756 / 912\n", - "757 / 912\n", - "759 / 912\n", - "760 / 912\n", - "762 / 912\n", - "763 / 912\n", - "765 / 912\n", - "766 / 912\n", - "768 / 912\n", - "769 / 912\n", - "771 / 912\n", - "772 / 912\n", - "774 / 912\n", - "775 / 912\n", - "777 / 912\n", - "778 / 912\n", - "780 / 912\n", - "781 / 912\n", - "783 / 912\n", - "784 / 912\n", - "786 / 912\n", - "787 / 912\n", - "789 / 912\n", - "790 / 912\n", - "792 / 912\n", - "793 / 912\n", - "795 / 912\n", - "796 / 912\n", - "798 / 912\n", - "799 / 912\n", - "801 / 912\n", - "802 / 912\n", - "804 / 912\n", - "805 / 912\n", - "807 / 912\n", - "808 / 912\n", - "810 / 912\n", - "811 / 912\n", - "813 / 912\n", - "814 / 912\n", - "816 / 912\n", - "817 / 912\n", - "819 / 912\n", - "820 / 912\n", - "822 / 912\n", - "823 / 912\n", - "825 / 912\n", - "826 / 912\n", - "828 / 912\n", - "829 / 912\n", - "831 / 912\n", - "832 / 912\n", - "834 / 912\n", - "835 / 912\n", - "837 / 912\n", - "838 / 912\n", - "840 / 912\n", - "841 / 912\n", - "843 / 912\n", - "844 / 912\n", - "846 / 912\n", - "847 / 912\n", - "849 / 912\n", - "850 / 912\n", - "852 / 912\n", - "853 / 912\n", - "855 / 912\n", - "856 / 912\n", - "858 / 912\n", - "859 / 912\n", - "861 / 912\n", - "862 / 912\n", - "864 / 912\n", - "865 / 912\n", - "867 / 912\n", - "868 / 912\n", - "870 / 912\n", - "871 / 912\n", - "873 / 912\n", - "874 / 912\n", - "876 / 912\n", - "877 / 912\n", - "879 / 912\n", - "880 / 912\n", - "882 / 912\n", - "883 / 912\n", - "885 / 912\n", - "886 / 912\n", - "888 / 912\n", - "889 / 912\n", - "891 / 912\n", - "892 / 912\n", - "894 / 912\n", - "895 / 912\n", - "897 / 912\n", - "898 / 912\n", - "900 / 912\n", - "901 / 912\n", - "903 / 912\n", - "904 / 912\n", - "906 / 912\n", - "907 / 912\n", - "909 / 912\n", - "910 / 912\n", - "912 / 912\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_0_mlp_fc1.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_0_mlp_fc1.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_0_mlp_fc1.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_0_mlp_fc2.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_0_mlp_fc2.lora_down.weight : torch.Size([4, 3072])\n", + " lora_te1_text_model_encoder_layers_0_mlp_fc2.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_0_self_attn_k_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_0_self_attn_k_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_0_self_attn_out_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_0_self_attn_out_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_0_self_attn_out_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_0_self_attn_q_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_0_self_attn_q_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_0_self_attn_q_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_0_self_attn_v_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_0_self_attn_v_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_0_self_attn_v_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_10_mlp_fc1.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_10_mlp_fc1.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_10_mlp_fc1.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_10_mlp_fc2.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_10_mlp_fc2.lora_down.weight : torch.Size([4, 3072])\n", + " lora_te1_text_model_encoder_layers_10_mlp_fc2.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_10_self_attn_k_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_10_self_attn_k_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_10_self_attn_k_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_10_self_attn_out_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_10_self_attn_out_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_10_self_attn_out_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_10_self_attn_q_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_10_self_attn_q_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_10_self_attn_q_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_10_self_attn_v_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_10_self_attn_v_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_10_self_attn_v_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_11_mlp_fc1.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_11_mlp_fc1.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_11_mlp_fc1.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_11_mlp_fc2.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_11_mlp_fc2.lora_down.weight : torch.Size([4, 3072])\n", + " lora_te1_text_model_encoder_layers_11_mlp_fc2.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_11_self_attn_k_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_11_self_attn_k_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_11_self_attn_k_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_11_self_attn_out_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_11_self_attn_out_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_11_self_attn_out_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_11_self_attn_q_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_11_self_attn_q_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_11_self_attn_q_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_11_self_attn_v_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_11_self_attn_v_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_11_self_attn_v_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_1_mlp_fc1.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_1_mlp_fc1.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_1_mlp_fc1.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_1_mlp_fc2.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_1_mlp_fc2.lora_down.weight : torch.Size([4, 3072])\n", + " lora_te1_text_model_encoder_layers_1_mlp_fc2.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_1_self_attn_k_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_1_self_attn_k_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_1_self_attn_k_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_1_self_attn_out_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_1_self_attn_out_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_1_self_attn_out_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_1_self_attn_q_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_1_self_attn_q_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_1_self_attn_q_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_1_self_attn_v_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_1_self_attn_v_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_1_self_attn_v_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_2_mlp_fc1.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_2_mlp_fc1.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_2_mlp_fc1.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_2_mlp_fc2.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_2_mlp_fc2.lora_down.weight : torch.Size([4, 3072])\n", + " lora_te1_text_model_encoder_layers_2_mlp_fc2.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_2_self_attn_k_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_2_self_attn_k_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_2_self_attn_k_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_2_self_attn_out_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_2_self_attn_out_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_2_self_attn_out_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_2_self_attn_q_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_2_self_attn_q_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_2_self_attn_q_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_2_self_attn_v_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_2_self_attn_v_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_2_self_attn_v_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_3_mlp_fc1.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_3_mlp_fc1.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_3_mlp_fc1.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_3_mlp_fc2.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_3_mlp_fc2.lora_down.weight : torch.Size([4, 3072])\n", + " lora_te1_text_model_encoder_layers_3_mlp_fc2.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_3_self_attn_k_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_3_self_attn_k_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_3_self_attn_k_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_3_self_attn_out_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_3_self_attn_out_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_3_self_attn_out_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_3_self_attn_q_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_3_self_attn_q_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_3_self_attn_q_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_3_self_attn_v_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_3_self_attn_v_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_3_self_attn_v_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_4_mlp_fc1.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_4_mlp_fc1.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_4_mlp_fc1.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_4_mlp_fc2.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_4_mlp_fc2.lora_down.weight : torch.Size([4, 3072])\n", + " lora_te1_text_model_encoder_layers_4_mlp_fc2.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_4_self_attn_k_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_4_self_attn_k_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_4_self_attn_k_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_4_self_attn_out_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_4_self_attn_out_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_4_self_attn_out_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_4_self_attn_q_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_4_self_attn_q_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_4_self_attn_q_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_4_self_attn_v_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_4_self_attn_v_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_4_self_attn_v_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_5_mlp_fc1.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_5_mlp_fc1.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_5_mlp_fc1.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_5_mlp_fc2.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_5_mlp_fc2.lora_down.weight : torch.Size([4, 3072])\n", + " lora_te1_text_model_encoder_layers_5_mlp_fc2.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_5_self_attn_k_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_5_self_attn_k_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_5_self_attn_k_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_5_self_attn_out_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_5_self_attn_out_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_5_self_attn_out_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_5_self_attn_q_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_5_self_attn_q_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_5_self_attn_q_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_5_self_attn_v_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_5_self_attn_v_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_5_self_attn_v_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_6_mlp_fc1.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_6_mlp_fc1.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_6_mlp_fc1.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_6_mlp_fc2.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_6_mlp_fc2.lora_down.weight : torch.Size([4, 3072])\n", + " lora_te1_text_model_encoder_layers_6_mlp_fc2.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_6_self_attn_k_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_6_self_attn_k_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_6_self_attn_k_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_6_self_attn_out_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_6_self_attn_out_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_6_self_attn_out_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_6_self_attn_q_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_6_self_attn_q_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_6_self_attn_q_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_6_self_attn_v_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_6_self_attn_v_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_6_self_attn_v_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_7_mlp_fc1.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_7_mlp_fc1.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_7_mlp_fc1.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_7_mlp_fc2.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_7_mlp_fc2.lora_down.weight : torch.Size([4, 3072])\n", + " lora_te1_text_model_encoder_layers_7_mlp_fc2.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_7_self_attn_k_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_7_self_attn_k_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_7_self_attn_k_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_7_self_attn_out_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_7_self_attn_out_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_7_self_attn_out_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_7_self_attn_q_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_7_self_attn_q_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_7_self_attn_q_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_7_self_attn_v_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_7_self_attn_v_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_7_self_attn_v_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_8_mlp_fc1.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_8_mlp_fc1.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_8_mlp_fc1.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_8_mlp_fc2.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_8_mlp_fc2.lora_down.weight : torch.Size([4, 3072])\n", + " lora_te1_text_model_encoder_layers_8_mlp_fc2.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_8_self_attn_k_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_8_self_attn_k_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_8_self_attn_k_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_8_self_attn_out_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_8_self_attn_out_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_8_self_attn_out_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_8_self_attn_q_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_8_self_attn_q_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_8_self_attn_q_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_8_self_attn_v_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_8_self_attn_v_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_8_self_attn_v_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_9_mlp_fc1.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_9_mlp_fc1.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_9_mlp_fc1.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_9_mlp_fc2.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_9_mlp_fc2.lora_down.weight : torch.Size([4, 3072])\n", + " lora_te1_text_model_encoder_layers_9_mlp_fc2.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_9_self_attn_k_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_9_self_attn_k_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_9_self_attn_k_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_9_self_attn_out_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_9_self_attn_out_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_9_self_attn_out_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_9_self_attn_q_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_9_self_attn_q_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_9_self_attn_q_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_te1_text_model_encoder_layers_9_self_attn_v_proj.alpha : torch.Size([])\n", + " lora_te1_text_model_encoder_layers_9_self_attn_v_proj.lora_down.weight : torch.Size([4, 768])\n", + " lora_te1_text_model_encoder_layers_9_self_attn_v_proj.lora_up.weight : torch.Size([768, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_0_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_0_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_0_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_0_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_0_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_0_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_0_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_0_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_0_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_10_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_10_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_10_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_10_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_10_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_10_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_10_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_10_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_10_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_11_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_11_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_11_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_11_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_11_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_11_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_11_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_11_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_11_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_12_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_12_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_12_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_12_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_12_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_12_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_12_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_12_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_12_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_13_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_13_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_13_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_13_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_13_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_13_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_13_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_13_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_13_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_14_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_14_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_14_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_14_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_14_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_14_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_14_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_14_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_14_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_15_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_15_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_15_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_15_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_15_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_15_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_15_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_15_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_15_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_16_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_16_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_16_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_16_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_16_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_16_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_16_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_16_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_16_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_17_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_17_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_17_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_17_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_17_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_17_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_17_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_17_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_17_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_18_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_18_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_18_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_18_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_18_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_18_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_18_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_18_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_18_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_19_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_19_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_19_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_19_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_19_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_19_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_19_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_19_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_19_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_1_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_1_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_1_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_1_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_1_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_1_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_1_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_1_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_1_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_20_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_20_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_20_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_20_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_20_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_20_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_20_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_20_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_20_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_21_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_21_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_21_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_21_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_21_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_21_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_21_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_21_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_21_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_22_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_22_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_22_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_22_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_22_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_22_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_22_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_22_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_22_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_23_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_23_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_23_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_23_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_23_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_23_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_23_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_23_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_23_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_24_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_24_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_24_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_24_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_24_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_24_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_24_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_24_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_24_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_25_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_25_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_25_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_25_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_25_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_25_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_25_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_25_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_25_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_26_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_26_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_26_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_26_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_26_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_26_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_26_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_26_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_26_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_27_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_27_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_27_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_27_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_27_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_27_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_27_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_27_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_27_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_28_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_28_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_28_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_28_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_28_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_28_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_28_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_28_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_28_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_29_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_29_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_29_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_29_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_29_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_29_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_29_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_29_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_29_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_2_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_2_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_2_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_2_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_2_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_2_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_2_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_2_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_2_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_30_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_30_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_30_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_30_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_30_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_30_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_30_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_30_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_30_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_31_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_31_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_31_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_31_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_31_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_31_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_31_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_31_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_31_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_32_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_32_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_32_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_32_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_32_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_32_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_32_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_32_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_32_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_33_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_33_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_33_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_33_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_33_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_33_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_33_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_33_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_33_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_34_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_34_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_34_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_34_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_34_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_34_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_34_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_34_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_34_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_35_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_35_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_35_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_35_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_35_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_35_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_35_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_35_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_35_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_36_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_36_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_36_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_36_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_36_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_36_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_36_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_36_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_36_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_37_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_37_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_37_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_37_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_37_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_37_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_37_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_37_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_37_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_3_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_3_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_3_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_3_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_3_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_3_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_3_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_3_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_3_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_4_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_4_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_4_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_4_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_4_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_4_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_4_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_4_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_4_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_5_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_5_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_5_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_5_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_5_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_5_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_5_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_5_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_5_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_6_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_6_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_6_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_6_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_6_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_6_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_6_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_6_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_6_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_7_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_7_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_7_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_7_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_7_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_7_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_7_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_7_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_7_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_8_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_8_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_8_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_8_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_8_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_8_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_8_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_8_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_8_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_9_linear1.alpha : torch.Size([])\n", + " lora_unet_single_blocks_9_linear1.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_9_linear1.lora_up.weight : torch.Size([21504, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_9_linear2.alpha : torch.Size([])\n", + " lora_unet_single_blocks_9_linear2.lora_down.weight : torch.Size([4, 15360])\n", + " lora_unet_single_blocks_9_linear2.lora_up.weight : torch.Size([3072, 4])\n", + "tensor(1., dtype=torch.bfloat16)\n", + " lora_unet_single_blocks_9_modulation_lin.alpha : torch.Size([])\n", + " lora_unet_single_blocks_9_modulation_lin.lora_down.weight : torch.Size([4, 3072])\n", + " lora_unet_single_blocks_9_modulation_lin.lora_up.weight : torch.Size([9216, 4])\n", + "3 / 558\n", + "6 / 558\n", + "9 / 558\n", + "12 / 558\n", + "15 / 558\n", + "18 / 558\n", + "21 / 558\n", + "24 / 558\n", + "27 / 558\n", + "30 / 558\n", + "33 / 558\n", + "36 / 558\n", + "39 / 558\n", + "42 / 558\n", + "45 / 558\n", + "48 / 558\n", + "51 / 558\n", + "54 / 558\n", + "57 / 558\n", + "60 / 558\n", + "63 / 558\n", + "66 / 558\n", + "69 / 558\n", + "72 / 558\n", + "75 / 558\n", + "78 / 558\n", + "81 / 558\n", + "84 / 558\n", + "87 / 558\n", + "90 / 558\n", + "93 / 558\n", + "96 / 558\n", + "99 / 558\n", + "102 / 558\n", + "105 / 558\n", + "108 / 558\n", + "111 / 558\n", + "114 / 558\n", + "117 / 558\n", + "120 / 558\n", + "123 / 558\n", + "126 / 558\n", + "129 / 558\n", + "132 / 558\n", + "135 / 558\n", + "138 / 558\n", + "141 / 558\n", + "144 / 558\n", + "147 / 558\n", + "150 / 558\n", + "153 / 558\n", + "156 / 558\n", + "159 / 558\n", + "162 / 558\n", + "165 / 558\n", + "168 / 558\n", + "171 / 558\n", + "174 / 558\n", + "177 / 558\n", + "180 / 558\n", + "183 / 558\n", + "186 / 558\n", + "189 / 558\n", + "192 / 558\n", + "195 / 558\n", + "198 / 558\n", + "201 / 558\n", + "204 / 558\n", + "207 / 558\n", + "210 / 558\n", + "213 / 558\n", + "216 / 558\n", + "219 / 558\n", + "222 / 558\n", + "225 / 558\n", + "228 / 558\n", + "231 / 558\n", + "234 / 558\n", + "237 / 558\n", + "240 / 558\n", + "243 / 558\n", + "246 / 558\n", + "249 / 558\n", + "252 / 558\n", + "255 / 558\n", + "258 / 558\n", + "261 / 558\n", + "264 / 558\n", + "267 / 558\n", + "270 / 558\n", + "273 / 558\n", + "276 / 558\n", + "279 / 558\n", + "282 / 558\n", + "285 / 558\n", + "288 / 558\n", + "291 / 558\n", + "294 / 558\n", + "297 / 558\n", + "300 / 558\n", + "303 / 558\n", + "306 / 558\n", + "309 / 558\n", + "312 / 558\n", + "315 / 558\n", + "318 / 558\n", + "321 / 558\n", + "324 / 558\n", + "327 / 558\n", + "330 / 558\n", + "333 / 558\n", + "336 / 558\n", + "339 / 558\n", + "342 / 558\n", + "345 / 558\n", + "348 / 558\n", + "351 / 558\n", + "354 / 558\n", + "357 / 558\n", + "360 / 558\n", + "363 / 558\n", + "366 / 558\n", + "369 / 558\n", + "372 / 558\n", + "375 / 558\n", + "378 / 558\n", + "381 / 558\n", + "384 / 558\n", + "387 / 558\n", + "390 / 558\n", + "393 / 558\n", + "396 / 558\n", + "399 / 558\n", + "402 / 558\n", + "405 / 558\n", + "408 / 558\n", + "411 / 558\n", + "414 / 558\n", + "417 / 558\n", + "420 / 558\n", + "423 / 558\n", + "426 / 558\n", + "429 / 558\n", + "432 / 558\n", + "435 / 558\n", + "438 / 558\n", + "441 / 558\n", + "444 / 558\n", + "447 / 558\n", + "450 / 558\n", + "453 / 558\n", + "456 / 558\n", + "459 / 558\n", + "462 / 558\n", + "465 / 558\n", + "468 / 558\n", + "471 / 558\n", + "474 / 558\n", + "477 / 558\n", + "480 / 558\n", + "483 / 558\n", + "486 / 558\n", + "489 / 558\n", + "492 / 558\n", + "495 / 558\n", + "498 / 558\n", + "501 / 558\n", + "504 / 558\n", + "507 / 558\n", + "510 / 558\n", + "513 / 558\n", + "516 / 558\n", + "519 / 558\n", + "522 / 558\n", + "525 / 558\n", + "528 / 558\n", + "531 / 558\n", + "534 / 558\n", + "537 / 558\n", + "540 / 558\n", + "543 / 558\n", + "546 / 558\n", + "549 / 558\n", + "552 / 558\n", + "555 / 558\n", + "558 / 558\n", "done!\n", "casting params to fp16....\n", "done!\n", - "saving scale_200_r32_a16.safetensors...\n" + "saving window.safetensors...\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "tgt = load_file('/content/doggy.safetensors')\n", + "for key in tgt:\n", + " if f'{key}'.find('alpha')>-1: print(tgt[f'{key}'])\n", + " print(tgt[f'{key}'].shape)" + ], + "metadata": { + "id": "qk0Lbf27vBjl", + "outputId": "109eaa9f-6941-4cb7-d084-ca54eac1ac6c", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "execution_count": 14, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 768])\n", + "torch.Size([768, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([21504, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "tensor(16., dtype=torch.float16)\n", + "torch.Size([])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n" ] } ] @@ -995,38 +1835,241 @@ { "cell_type": "code", "source": [ + "\n", + "import torch\n", + "from safetensors.torch import load_file, save_file\n", + "import torch.nn as nn\n", + "from torch import linalg as LA\n", + "import os\n", + "import math\n", + "import random\n", + "import numpy as np\n", + "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", + "\n", + "# For pcnt = 30 , 'filter_and_save' will keep all top 30 % values\n", + "#, and the lowest (negative) 30% values for each layer delta_W in this lora\n", + "# Then save the new filtered lora as a .safetensor file\n", + "def filter_and_save(_lora , savefile_name, new_rank , new_alpha, resolution):\n", + " lora = {}\n", + " count = 0\n", + " for key in _lora:count = count + 1\n", + " NUM_ITEMS = count\n", + " count = 0\n", + " thresh = resolution*0.000001 # 1e-6\n", + " #-------#\n", + " for key in _lora:\n", + " if f'{key}'.find('alpha') > -1:\n", + " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", + " count = count + 1\n", + " print(f'{count} / {NUM_ITEMS}')\n", + " continue\n", + " #------#\n", + " if not f'{key}'.find('lora_down') > -1: continue\n", + " up = f'{key}'.replace('lora_down' , 'lora_up')\n", + " down = f'{key}'\n", + " #-------#\n", + " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", + " #---#\n", + " N = delta_W.numel()\n", + " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", + " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", + " y = torch.zeros(y.shape).to(device = device , dtype=torch.float32)\n", + " y[indices[values>thresh]] = 1\n", + " y[indices[values<-thresh]] = 1\n", + " y = y.unflatten(0,delta_W.shape).to(device = device , dtype=torch.float32)\n", + " delta_W = torch.mul(delta_W,y).to(device = device , dtype=torch.float32)\n", + " #------#\n", + " tmp={}\n", + " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", + " tmp['u'] = tmp['u'][:,: new_rank]\n", + " tmp['s'] = tmp['s'][: new_rank]\n", + " #-------#\n", + " tmp['u'] = torch.round(torch.matmul(tmp['u'], torch.diag(tmp['s'])),decimals=6)\n", + " tmp['Vh'] = torch.round(tmp['Vh'].t()[: new_rank,:],decimals=6)\n", + " #-------#\n", + " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", + " lora[up] = tmp['u'].to(device = device , dtype=torch.float32)\n", + " lora[down] = tmp['Vh'].to(device = device , dtype=torch.float32)\n", + " #-------#\n", + " count = count + 2\n", + " print(f'{count} / {NUM_ITEMS}')\n", + " #-------#\n", + " print(f'done!')\n", + " print(f'casting params to fp16....')\n", + " for key in _lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", + " #-------#\n", + " print(f'done!')\n", + " print(f'saving {savefile_name}...')\n", + " save_file(lora , f'{savefile_name}')\n", + "#--------#\n", + "\n", + "def count_zeros(_lora, resolution):\n", + " count = 0\n", + " for key in _lora:count = count + 1\n", + " NUM_ITEMS = count\n", + " count = 0\n", + " #-----#\n", + " thresh = resolution*0.000001 # 1e-6\n", + "\n", + " print(f'at resolution = {resolution}e-6 :')\n", + " for key in _lora:\n", + " if f'{key}'.find('alpha') > -1:\n", + " count = count + 1\n", + " continue\n", + " #------#\n", + " if not f'{key}'.find('lora_down') > -1: continue\n", + " up = f'{key}'.replace('lora_down' , 'lora_up')\n", + " down = f'{key}'\n", + " #-------#\n", + " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", + " N = delta_W.numel()\n", + " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", + " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", + " y = torch.ones(y.shape).to(device = device , dtype=torch.float32)\n", + " y[indices[values>thresh]] = 0\n", + " neg_pcnt = round((100*torch.sum(y) / N).item(),2)\n", + " y[indices[values<-thresh]] = 0\n", + " count = count + 2\n", + " pcnt = round((100*torch.sum(y) / N).item(),2)\n", + " neg_pcnt = round(neg_pcnt - pcnt,2) # remove zero % from neg_pcnt\n", + " pos_pcnt = round(100- pcnt - neg_pcnt,2)\n", + " print(f'at {count} / {NUM_ITEMS} : {pcnt} % zeros ,{pos_pcnt} % pos. , {neg_pcnt} % neg ')\n", + " #------#\n", + "#-----#\n", + "\n", + "def merge_and_save(_lora1 , _lora2 , _lora3, savefile_name, new_rank , new_alpha, resolution):\n", + " lora = {}\n", + " count = 0\n", + " for key in _lora1:count = count + 1\n", + " NUM_ITEMS = count\n", + " count = 0\n", + " thresh = resolution*0.000001 # 1e-6\n", + "\n", + " #-------#\n", + " for key in _lora1:\n", + " if f'{key}'.find('alpha') > -1:\n", + " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", + " count = count + 1\n", + " print(f'{count} / {NUM_ITEMS}')\n", + " continue\n", + " #------#\n", + " #if count<462:\n", + " # count = count + 2\n", + " # continue\n", + " if not f'{key}'.find('lora_down') > -1: continue\n", + " up = f'{key}'.replace('lora_down' , 'lora_up')\n", + " down = f'{key}'\n", + " #-------#\n", + "\n", + " # Setup\n", + " delta_W = torch.matmul(_lora1[up]*0,_lora1[down]*0).to(device = device, dtype=torch.float32)\n", + " tgt_shape = delta_W.shape\n", + " N = delta_W.numel()\n", + " delta_W = torch.zeros(N).to(device = device , dtype=torch.float32)\n", + " #-----#\n", + "\n", + " #Positives\n", + " Y = torch.zeros(3,N).to(device = device , dtype=torch.float32)\n", + " Y[0] = torch.matmul(_lora1[up],_lora1[down]).flatten().to(device = device , dtype=torch.float32)\n", + " Y[1] = torch.matmul(_lora2[up],_lora2[down]).flatten().to(device = device , dtype=torch.float32)\n", + " Y[2] = torch.matmul(_lora3[up],_lora3[down]).flatten().to(device = device , dtype=torch.float32)\n", + " Y[torch.abs(Y)0,dim=1) + 0.001\n", + " elect = torch.sum(Y<0,dim=1) + 0.001\n", + " elect = (num>=elect)\n", + " Y[Y<0] = 0\n", + " Y = torch.sum(Y, dim=1).to(device = device , dtype=torch.float32)\n", + " delta_W[elect] = torch.round((Y[elect]/num[elect]),decimals=6).to(device = device , dtype=torch.float32)\n", + " #-----#\n", + "\n", + " #Negatives\n", + " Y = torch.zeros(3,N).to(device = device , dtype=torch.float32)\n", + " Y[0] = torch.matmul(_lora1[up],_lora1[down]).flatten().to(device = device , dtype=torch.float32)\n", + " Y[1] = torch.matmul(_lora2[up],_lora2[down]).flatten().to(device = device , dtype=torch.float32)\n", + " Y[2] = torch.matmul(_lora3[up],_lora3[down]).flatten().to(device = device , dtype=torch.float32)\n", + " Y[torch.abs(Y)0,dim=1) + 0.001\n", + " elect = (elect0] = 0\n", + " Y = torch.sum(Y, dim=1).to(device = device , dtype=torch.float32)\n", + " delta_W[elect] = torch.round(Y[elect]/num[elect],decimals=6).to(device = device , dtype=torch.float32)\n", + " #----#\n", + "\n", + " # Free up memory prior to SVD\n", + " delta_W = delta_W.unflatten(0,tgt_shape).to(device = device , dtype=torch.float32)\n", + " delta_W = delta_W.clone().detach()\n", + " Y = {}\n", + " num = {}\n", + " num = {}\n", + " elect = {}\n", + " #-----#\n", + "\n", + " # Run SVD (Single Value Decomposition)\n", + " #to get the new lora_up and lora_down for delta_W\n", + " tmp={}\n", + " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", + " tmp['u'] = tmp['u'][:,: new_rank]\n", + " tmp['s'] = tmp['s'][: new_rank]\n", + " tmp['u'] = torch.matmul(tmp['u'], torch.diag(tmp['s']))\n", + " tmp['Vh'] = tmp['Vh'].t()[: new_rank,:]\n", + " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", + " lora[up] = torch.round(tmp['u'],decimals=6).to(device = device , dtype=torch.float32)\n", + " lora[down] = torch.round(tmp['Vh'],decimals=6).to(device = device , dtype=torch.float32)\n", + " #-------#\n", + "\n", + " count = count + 2\n", + " print(f'{count} / {NUM_ITEMS}')\n", + " #----#\n", + " #--------#\n", + " print(f'done!')\n", + " print(f'casting params to fp16....')\n", + " for key in lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", + " #-------#\n", + " print(f'done!')\n", + " print(f'saving {savefile_name}...')\n", + " save_file(lora , f'{savefile_name}')\n", + "#------#\n", + "\n", "new_rank = 32\n", "new_alpha = math.floor(new_rank/2)\n", "resolution = 200\n", - "name = 'star_euro_scale'\n", - "a = load_file('/content/drive/MyDrive/Saved from Chrome/star_100_r32_16alpha.safetensors')\n", - "b = load_file('/content/drive/MyDrive/Saved from Chrome/euro_100_r32_16alpha.safetensors')\n", - "c = load_file('/content/scale_200_r32_a16.safetensors')\n", + "name = 'yeero_euro_puff'\n", + "yeero = load_file('/content/drive/MyDrive/Saved from Chrome/yeero_100_r32_16alpha.safetensors')\n", + "euro = load_file('/content/drive/MyDrive/Saved from Chrome/euro_100_r32_16alpha.safetensors')\n", + "puff = load_file('/content/drive/MyDrive/Saved from Chrome/puff_200_r32_16alpha.safetensors')\n", "savefile_name = f'{name}_{resolution}_r{new_rank}_a{new_alpha}.safetensors'\n", "\n", "#tgt = load_file(f'/kaggle/input/flux-loras/{name}_{resolution}_r32_16alpha.safetensors')\n", "for key in yeero:\n", - " a[f'{key}'] = a[f'{key}'].to(device = device , dtype = torch.float32)\n", - " b[f'{key}'] = b[f'{key}'].to(device = device , dtype = torch.float32)\n", - " c[f'{key}'] = c[f'{key}'].to(device = device , dtype = torch.float32)\n", + " yeero[f'{key}'] = yeero[f'{key}'].to(device = device , dtype = torch.float32)\n", + " euro[f'{key}'] = euro[f'{key}'].to(device = device , dtype = torch.float32)\n", + " puff[f'{key}'] = puff[f'{key}'].to(device = device , dtype = torch.float32)\n", "#-----#\n", "print(f'for {name}.safetensors at scale = (rank/alpha) = 0.5')\n", - "merge_and_save(a,b,c, savefile_name, new_rank , new_alpha, resolution)" + "merge_and_save(yeero , euro , puff, savefile_name, new_rank , new_alpha, resolution)\n", + "\n", + "\n", + "#Yeero + Scale + Puff\n", + "#filter_and_save(tgt , f'{name}_{resolution}_r{new_rank}_{new_alpha}alpha.safetensors' , new_rank , new_alpha, resolution)\n" ], "metadata": { - "id": "l9RX4PLtqzkZ", - "outputId": "261ab4f5-972f-451e-a097-9ca9c14c9539", + "id": "SKYzFxehkfG8", "colab": { "base_uri": "https://localhost:8080/" - } + }, + "outputId": "70f308e8-6632-42ca-e3ce-607e56813778", + "collapsed": true }, - "execution_count": 7, + "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "for star_euro_scale.safetensors at scale = (rank/alpha) = 0.5\n", + "for yeero_euro_puff.safetensors at scale = (rank/alpha) = 0.5\n", "1 / 912\n", "3 / 912\n", "4 / 912\n", @@ -1638,7 +2681,7 @@ "done!\n", "casting params to fp16....\n", "done!\n", - "saving star_euro_scale_200_r32_a16.safetensors...\n" + "saving yeero_euro_puff_200_r32_a16.safetensors...\n" ] } ]