File size: 12,653 Bytes
a65c211
1
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1742303655056},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740768524003},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740657473013},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739796923572},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735627072}],"gpuType":"T4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU"},"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"Dwr7gk5OwuGC"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","!pip install peft bitsandbytes\n","from huggingface_hub import InferenceClient\n","from torch import nn\n","from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n","import torch\n","import torch.amp.autocast_mode\n","from PIL import Image\n","import os\n","import torchvision.transforms.functional as TVF\n","\n","CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n","MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n","CAPTION_TYPE_MAP = {\n","    (\"descriptive\", \"formal\", False, False): [\"Describe the image in 400 words\"],\n","    (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n","    (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n","    (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n","    (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n","    (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n","    (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n","    (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n","    (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n","    (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n","    (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n","    (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n","}\n","\n","class ImageAdapter(nn.Module):\n","\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n","\t\tsuper().__init__()\n","\t\tself.deep_extract = deep_extract\n","\t\tif self.deep_extract:\n","\t\t\tinput_features = input_features * 5\n","\t\tself.linear1 = nn.Linear(input_features, output_features)\n","\t\tself.activation = nn.GELU()\n","\t\tself.linear2 = nn.Linear(output_features, output_features)\n","\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n","\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n","\t\tself.other_tokens = nn.Embedding(3, output_features)\n","\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02)   # Matches HF's implementation of llama3\n","\tdef forward(self, vision_outputs: torch.Tensor):\n","\t\tif self.deep_extract:\n","\t\t\tx = torch.concat((\n","\t\t\t\tvision_outputs[-2],\n","\t\t\t\tvision_outputs[3],\n","\t\t\t\tvision_outputs[7],\n","\t\t\t\tvision_outputs[13],\n","\t\t\t\tvision_outputs[20],\n","\t\t\t), dim=-1)\n","\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\"  # batch, tokens, features\n","\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n","\t\telse:\n","\t\t\tx = vision_outputs[-2]\n","\t\tx = self.ln1(x)\n","\t\tif self.pos_emb is not None:\n","\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n","\t\t\tx = x + self.pos_emb\n","\t\tx = self.linear1(x)\n","\t\tx = self.activation(x)\n","\t\tx = self.linear2(x)\n","\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n","\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n","\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n","\t\treturn x\n","\tdef get_eot_embedding(self):\n","\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n","\n","clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n","clip_model = AutoModel.from_pretrained(CLIP_PATH)\n","clip_model = clip_model.vision_model\n","checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n","checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n","clip_model.load_state_dict(checkpoint)\n","# del checkpoint\n","clip_model.eval()\n","clip_model.requires_grad_(False)\n","clip_model.to(\"cuda\")\n","tokenizer = AutoTokenizer.from_pretrained(f'{MODEL_PATH}')\n","#tokenizer = AutoTokenizer.from_pretrained(\"unsloth/Meta-Llama-3.1-8B-bnb-4bit\")\n","assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n","text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, load_in_8bit=True, device_map=\"auto\", torch_dtype=torch.bfloat16)\n","text_model.load_adapter(\"/content/joy/text_model\")\n","text_model.eval()\n","image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n","image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n","image_adapter.eval()\n","image_adapter.to(\"cuda\")\n","\n","@torch.no_grad()\n","def stream_chat(input_image: Image.Image, caption_type: str, caption_tone: str, caption_length: str | int) -> str:\n","    torch.cuda.empty_cache()\n","    length = None if caption_length == \"any\" else caption_length\n","    if isinstance(length, str):\n","        try:\n","            length = int(length)\n","        except ValueError:\n","            pass\n","    if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n","        caption_tone = \"formal\"\n","    prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n","    if prompt_key not in CAPTION_TYPE_MAP:\n","        raise ValueError(f\"Invalid caption type: {prompt_key}\")\n","    prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n","    print(f\"Prompt: {prompt_str}\")\n","    image = input_image.resize((384, 384), Image.LANCZOS)\n","    pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n","    pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n","    pixel_values = pixel_values.to('cuda')\n","    prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n","    with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n","        vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n","        image_features = vision_outputs.hidden_states\n","        embedded_images = image_adapter(image_features)\n","        embedded_images = embedded_images.to('cuda')\n","    prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n","    assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n","    embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n","    eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n","    inputs_embeds = torch.cat([\n","        embedded_bos.expand(embedded_images.shape[0], -1, -1),\n","        embedded_images.to(dtype=embedded_bos.dtype),\n","        prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n","        eot_embed.expand(embedded_images.shape[0], -1, -1),\n","    ], dim=1)\n","    input_ids = torch.cat([\n","        torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n","        torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n","        prompt,\n","        torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n","    ], dim=1).to('cuda')\n","    attention_mask = torch.ones_like(input_ids)\n","    generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=True, suppress_tokens=None)   # Uses the default which is temp=0.6, top_p=0.9\n","    generate_ids = generate_ids[:, input_ids.shape[1]:]\n","    if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n","        generate_ids = generate_ids[:, :-1]\n","    caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n","    caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n","    return caption"],"metadata":{"id":"0zaheBIsw_dc"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import os\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n","  if os.path.exists(folder)==False:\n","    os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","\n","\n","src_folder = '/content/'\n","suffixes = ['.png', '.jpeg' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n","  for suffix in suffixes:\n","    if not filename.find(suffix)>-1: continue\n","    print(filename)\n","    %cd {src_folder}\n","    input_image = Image.open(f\"{filename}\").convert('RGB')\n","    caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n","    print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n","    print(caption)\n","    #---------#\n","    %cd {tgt_folder}\n","    f = open(f\"{num}.txt\", \"w\")\n","    f.write(f'{caption}')\n","    f.close()\n","    input_image.save(f'{num}.jpeg', \"JPEG\")\n","    num = num+1"],"metadata":{"id":"J811UZU6xZEo"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import shutil\n","shutil.make_archive('/content/tmp', 'zip', '/content/tmp')"],"metadata":{"id":"5EztLCjkPq4U"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["\n","from google.colab import runtime\n","#runtime.unassign() #Disconnect from runtime"],"metadata":{"id":"kM4TpfdB1amt"},"execution_count":null,"outputs":[]}]}