Spaces:
Sleeping
Sleeping
import spaces | |
import gradio as gr | |
from huggingface_hub import InferenceClient | |
from torch import nn | |
from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM | |
from pathlib import Path | |
import torch | |
import torch.amp.autocast_mode | |
from PIL import Image | |
import os | |
import torchvision.transforms.functional as TVF | |
from dotenv import load_dotenv | |
# Load environment variables from .env file | |
load_dotenv() | |
USERNAME = os.getenv("USERNAME") | |
PASSWORD = os.getenv("PASSWORD") | |
CLIP_PATH = "google/siglip-so400m-patch14-384" | |
MODEL_PATH = "meta-llama/Meta-Llama-3.1-8B" | |
CHECKPOINT_PATH = Path("9em124t2-499968") | |
TITLE = "<h1><center>JoyCaption Alpha One (2024-09-20a)</center></h1>" | |
CAPTION_TYPE_MAP = { | |
("descriptive", "formal", False, False): ["Write a descriptive caption for this image in a formal tone."], | |
("descriptive", "formal", False, True): ["Write a descriptive caption for this image in a formal tone within {word_count} words."], | |
("descriptive", "formal", True, False): ["Write a {length} descriptive caption for this image in a formal tone."], | |
("descriptive", "informal", False, False): ["Write a descriptive caption for this image in a casual tone."], | |
("descriptive", "informal", False, True): ["Write a descriptive caption for this image in a casual tone within {word_count} words."], | |
("descriptive", "informal", True, False): ["Write a {length} descriptive caption for this image in a casual tone."], | |
("training_prompt", "formal", False, False): ["Write a stable diffusion prompt for this image."], | |
("training_prompt", "formal", False, True): ["Write a stable diffusion prompt for this image within {word_count} words."], | |
("training_prompt", "formal", True, False): ["Write a {length} stable diffusion prompt for this image."], | |
("rng-tags", "formal", False, False): ["Write a list of Booru tags for this image."], | |
("rng-tags", "formal", False, True): ["Write a list of Booru tags for this image within {word_count} words."], | |
("rng-tags", "formal", True, False): ["Write a {length} list of Booru tags for this image."], | |
("style_prompt", "formal", False, False): ["Generate a detailed style prompt for this image, including lens type, film stock, composition notes, lighting aspects, and any special photographic techniques."], | |
("style_prompt", "formal", False, True): ["Generate a detailed style prompt for this image within {word_count} words, including lens type, film stock, composition notes, lighting aspects, and any special photographic techniques."], | |
("style_prompt", "formal", True, False): ["Generate a {length} detailed style prompt for this image, including lens type, film stock, composition notes, lighting aspects, and any special photographic techniques."], | |
} | |
HF_TOKEN = os.environ.get("HF_TOKEN", None) | |
class ImageAdapter(nn.Module): | |
def __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool): | |
super().__init__() | |
self.deep_extract = deep_extract | |
if self.deep_extract: | |
input_features = input_features * 5 | |
self.linear1 = nn.Linear(input_features, output_features) | |
self.activation = nn.GELU() | |
self.linear2 = nn.Linear(output_features, output_features) | |
self.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features) | |
self.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features)) | |
# Mode token | |
#self.mode_token = nn.Embedding(n_modes, output_features) | |
#self.mode_token.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3 | |
# Other tokens (<|image_start|>, <|image_end|>, <|eot_id|>) | |
self.other_tokens = nn.Embedding(3, output_features) | |
self.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3 | |
def forward(self, vision_outputs: torch.Tensor): | |
if self.deep_extract: | |
x = torch.concat(( | |
vision_outputs[-2], | |
vision_outputs[3], | |
vision_outputs[7], | |
vision_outputs[13], | |
vision_outputs[20], | |
), dim=-1) | |
assert len(x.shape) == 3, f"Expected 3, got {len(x.shape)}" # batch, tokens, features | |
assert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}" | |
else: | |
x = vision_outputs[-2] | |
x = self.ln1(x) | |
if self.pos_emb is not None: | |
assert x.shape[-2:] == self.pos_emb.shape, f"Expected {self.pos_emb.shape}, got {x.shape[-2:]}" | |
x = x + self.pos_emb | |
x = self.linear1(x) | |
x = self.activation(x) | |
x = self.linear2(x) | |
# Mode token | |
#mode_token = self.mode_token(mode) | |
#assert mode_token.shape == (x.shape[0], mode_token.shape[1], x.shape[2]), f"Expected {(x.shape[0], 1, x.shape[2])}, got {mode_token.shape}" | |
#x = torch.cat((x, mode_token), dim=1) | |
# <|image_start|>, IMAGE, <|image_end|> | |
other_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1)) | |
assert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}" | |
x = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1) | |
return x | |
def get_eot_embedding(self): | |
return self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0) | |
# Load CLIP | |
print("Loading CLIP") | |
clip_processor = AutoProcessor.from_pretrained(CLIP_PATH) | |
clip_model = AutoModel.from_pretrained(CLIP_PATH) | |
clip_model = clip_model.vision_model | |
if (CHECKPOINT_PATH / "clip_model.pt").exists(): | |
print("Loading VLM's custom vision model") | |
checkpoint = torch.load(CHECKPOINT_PATH / "clip_model.pt", map_location='cpu') | |
checkpoint = {k.replace("_orig_mod.module.", ""): v for k, v in checkpoint.items()} | |
clip_model.load_state_dict(checkpoint) | |
del checkpoint | |
clip_model.eval() | |
clip_model.requires_grad_(False) | |
clip_model.to("cuda") | |
# Tokenizer | |
print("Loading tokenizer") | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, use_fast=False) | |
assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f"Tokenizer is of type {type(tokenizer)}" | |
# LLM | |
print("Loading LLM") | |
if (CHECKPOINT_PATH / "text_model").exists: | |
print("Loading VLM's custom text model") | |
text_model = AutoModelForCausalLM.from_pretrained(CHECKPOINT_PATH / "text_model", device_map=0, torch_dtype=torch.bfloat16) | |
else: | |
text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto", torch_dtype=torch.bfloat16) | |
text_model.eval() | |
# Image Adapter | |
print("Loading image adapter") | |
image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False) | |
image_adapter.load_state_dict(torch.load(CHECKPOINT_PATH / "image_adapter.pt", map_location="cpu", weights_only=True)) | |
image_adapter.eval() | |
image_adapter.to("cuda") | |
def preprocess_image(input_image: Image.Image) -> torch.Tensor: | |
""" | |
Preprocess the input image for the CLIP model. | |
""" | |
image = input_image.resize((384, 384), Image.LANCZOS) | |
pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0 | |
pixel_values = TVF.normalize(pixel_values, [0.5], [0.5]) | |
return pixel_values.to('cuda') | |
def generate_caption(text_model, tokenizer, image_features, prompt_str: str, max_new_tokens: int = 300) -> str: | |
""" | |
Generate a caption based on the image features and prompt. | |
""" | |
prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False) | |
prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda')) | |
embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64)) | |
eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype) | |
inputs_embeds = torch.cat([ | |
embedded_bos.expand(image_features.shape[0], -1, -1), | |
image_features.to(dtype=embedded_bos.dtype), | |
prompt_embeds.expand(image_features.shape[0], -1, -1), | |
eot_embed.expand(image_features.shape[0], -1, -1), | |
], dim=1) | |
input_ids = torch.cat([ | |
torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long), | |
torch.zeros((1, image_features.shape[1]), dtype=torch.long), | |
prompt, | |
torch.tensor([[tokenizer.convert_tokens_to_ids("<|eot_id|>")]], dtype=torch.long), | |
], dim=1).to('cuda') | |
attention_mask = torch.ones_like(input_ids) | |
generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=max_new_tokens, do_sample=True, suppress_tokens=None) | |
generate_ids = generate_ids[:, input_ids.shape[1]:] | |
if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids("<|eot_id|>"): | |
generate_ids = generate_ids[:, :-1] | |
return tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0].strip() | |
def stream_chat(input_image: Image.Image, caption_type: str, caption_tone: str, caption_length: str | int, lens_type: str = "", film_stock: str = "", composition_style: str = "", lighting_aspect: str = "", special_technique: str = "", color_effect: str = "") -> str: | |
""" | |
Generate a caption, training prompt, tags, or a style prompt for image generation based on the input image and parameters. | |
""" | |
# Check if an image has been uploaded | |
if input_image is None: | |
return "Error: Please upload an image before generating a caption." | |
torch.cuda.empty_cache() | |
try: | |
length = None if caption_length == "any" else caption_length | |
if isinstance(length, str): | |
length = int(length) | |
except ValueError: | |
raise ValueError(f"Invalid caption length: {caption_length}") | |
if caption_type in ["rng-tags", "training_prompt", "style_prompt"]: | |
caption_tone = "formal" | |
prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int)) | |
if prompt_key not in CAPTION_TYPE_MAP: | |
raise ValueError(f"Invalid caption type: {prompt_key}") | |
if caption_type == "style_prompt": | |
# For style prompt, we'll create a custom prompt for the LLM | |
base_prompt = "Analyze the given image and create a detailed Stable Diffusion prompt for generating a new, creative image inspired by it. " | |
base_prompt += "The prompt should describe the main elements, style, and mood of the image, " | |
base_prompt += "but also introduce creative variations or enhancements. " | |
base_prompt += "Include specific details about the composition, lighting, and overall atmosphere. " | |
# Add custom settings to the prompt | |
if lens_type: | |
lens_type_key = lens_type.split(":")[0].strip() | |
base_prompt += f"Incorporate the effect of a {lens_type_key} lens ({lens_types_info[lens_type_key]}). " | |
if film_stock: | |
film_stock_key = film_stock.split(":")[0].strip() | |
base_prompt += f"Apply the characteristics of {film_stock_key} film stock ({film_stocks_info[film_stock_key]}). " | |
if composition_style: | |
composition_style_key = composition_style.split(":")[0].strip() | |
base_prompt += f"Use a {composition_style_key} composition style ({composition_styles_info[composition_style_key]}). " | |
if lighting_aspect: | |
lighting_aspect_key = lighting_aspect.split(":")[0].strip() | |
base_prompt += f"Implement {lighting_aspect_key} lighting ({lighting_aspects_info[lighting_aspect_key]}). " | |
if special_technique: | |
special_technique_key = special_technique.split(":")[0].strip() | |
base_prompt += f"Apply the {special_technique_key} technique ({special_techniques_info[special_technique_key]}). " | |
if color_effect: | |
color_effect_key = color_effect.split(":")[0].strip() | |
base_prompt += f"Use a {color_effect_key} color effect ({color_effects_info[color_effect_key]}). " | |
base_prompt += f"The final prompt should be approximately {length} words long. " | |
base_prompt += "Format the output as a single paragraph without numbering or bullet points." | |
prompt_str = base_prompt | |
else: | |
prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length) | |
# Debugging: Print the constructed prompt string | |
print(f"Constructed Prompt: {prompt_str}") | |
pixel_values = preprocess_image(input_image) | |
with torch.amp.autocast_mode.autocast('cuda', enabled=True): | |
vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True) | |
image_features = vision_outputs.hidden_states | |
embedded_images = image_adapter(image_features) | |
embedded_images = embedded_images.to('cuda') | |
# Load the model from MODEL_PATH | |
text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto", torch_dtype=torch.bfloat16) | |
text_model.eval() | |
# Debugging: Print the prompt string before passing to generate_caption | |
print(f"Prompt passed to generate_caption: {prompt_str}") | |
caption = generate_caption(text_model, tokenizer, embedded_images, prompt_str) | |
return caption | |
css = """ | |
h1, h2, h3, h4, h5, h6, p, li, ul, ol, a, img { | |
text-align: left; | |
} | |
img { | |
display: inline-block; | |
vertical-align: middle; | |
margin-right: 10px; | |
max-width: 100%; | |
height: auto; | |
} | |
.centered-image { | |
display: block; | |
margin-left: auto; | |
margin-right: auto; | |
max-width: 100%; | |
height: auto; | |
} | |
ul, ol { | |
padding-left: 20px; | |
} | |
.gradio-container { | |
max-width: 100% !important; | |
padding: 0 !important; | |
} | |
.gradio-row { | |
margin-left: 0 !important; | |
margin-right: 0 !important; | |
} | |
.gradio-column { | |
padding-left: 0 !important; | |
padding-right: 0 !important; | |
} | |
/* Left-align dropdown text */ | |
.gradio-dropdown > div { | |
text-align: left !important; | |
} | |
/* Left-align checkbox labels */ | |
.gradio-checkbox label { | |
text-align: left !important; | |
} | |
/* Left-align radio button labels */ | |
.gradio-radio label { | |
text-align: left !important; | |
} | |
""" | |
# Add detailed descriptions for each option | |
lens_types_info = { | |
"Standard": "A versatile lens with a field of view similar to human vision.", | |
"Wide-angle": "Captures a wider field of view, great for landscapes and architecture. Applies moderate to strong lens effect with image warp.", | |
"Telephoto": "Used for distant subjects, gives an 'award-winning' or 'National Geographic' look. Creates interesting effects when prompted.", | |
"Macro": "For extreme close-up photography, revealing tiny details.", | |
"Fish-eye": "Ultra-wide-angle lens that creates a strong bubble-like distortion. Generates panoramic photos with the entire image warping into a bubble.", | |
"Tilt-shift": "Allows adjusting the plane of focus, creating a 'miniature' effect. Known for the 'diorama miniature look'.", | |
"Zoom lens": "Variable focal length lens. Often zooms in on the subject, perfect for creating a base for inpainting. Interesting effect on landscapes with motion blur.", | |
"GoPro": "Wide-angle lens with clean digital look. Excludes film grain and most filter effects, resulting in natural colors and regular saturation.", | |
"Pinhole camera": "Creates a unique, foggy, low-detail, historic photograph look. Used since the 1850s, with peak popularity in the 1930s." | |
} | |
film_stocks_info = { | |
"Kodak Portra": "Professional color negative film known for its natural skin tones and low contrast.", | |
"Fujifilm Velvia": "Slide film known for vibrant colors and high saturation, popular among landscape photographers.", | |
"Ilford Delta": "Black and white film known for its fine grain and high sharpness.", | |
"Kodak Tri-X": "Classic high-speed black and white film, known for its distinctive grain and wide exposure latitude.", | |
"Fujifilm Provia": "Color reversal film known for its natural color reproduction and fine grain.", | |
"Cinestill": "Color photos with fine/low grain and higher than average resolution. Colors are slightly oversaturated or slightly desaturated.", | |
"Ektachrome": "Color photos with fine/low to moderate grain. Colors on the colder part of spectrum or regular, with normal or slightly higher saturation.", | |
"Ektar": "Modern Kodak film. Color photos with little to no grain. Results look like regular modern photography with artistic angles.", | |
"Film Washi": "Mostly black and white photos with fine/low to moderate grain. Occasionally gives colored photos with low saturation. Distinct style with high black contrast and soft camera lens effect.", | |
"Fomapan": "Black and white photos with fine/low to moderate grain, highly artistic exposure and angles. Adds very soft lens effect without distortion, dark photo vignette.", | |
"Fujicolor": "Color photos with fine/low to moderate grain. Colors are either very oversaturated or slightly desaturated, with entire color hue shifted in a very distinct manner.", | |
"Holga": "Color photos with high grain. Colors are either very oversaturated or slightly desaturated. Distinct contrast of black. Often applies photographic vignette.", | |
"Instax": "Instant color photos similar to Polaroid but clearer. Near perfect colors, regular saturation, fine/low to medium grain.", | |
"Lomography": "Color photos with high grain. Colors are either very oversaturated or slightly desaturated. Distinct contrast of black. Often applies photographic vignette.", | |
"Kodachrome": "Color photos with moderate grain. Colors on either colder part of spectrum or regular, with normal or slightly higher saturation.", | |
"Rollei": "Mostly black and white photos, sometimes color with fine/low grain. Can be sepia colored or have unusual hues and desaturation. Great for landscapes." | |
} | |
composition_styles_info = { | |
"Rule of Thirds": "Divides the frame into a 3x3 grid, placing key elements along the lines or at their intersections.", | |
"Golden Ratio": "Uses a spiral based on the golden ratio to create a balanced and aesthetically pleasing composition.", | |
"Symmetry": "Creates a mirror-like balance in the image, often used for architectural or nature photography.", | |
"Leading Lines": "Uses lines within the frame to draw the viewer's eye to the main subject or through the image.", | |
"Framing": "Uses elements within the scene to create a frame around the main subject.", | |
"Minimalism": "Simplifies the composition to its essential elements, often with a lot of negative space.", | |
"Fill the Frame": "The main subject dominates the entire frame, leaving little to no background.", | |
"Negative Space": "Uses empty space around the subject to create a sense of simplicity or isolation.", | |
"Centered Composition": "Places the main subject in the center of the frame, creating a sense of stability or importance.", | |
"Diagonal Lines": "Uses diagonal elements to create a sense of movement or dynamic tension in the image.", | |
"Triangular Composition": "Arranges elements in the frame to form a triangle, creating a sense of stability and harmony.", | |
"Radial Balance": "Arranges elements in a circular pattern around a central point, creating a sense of movement or completeness." | |
} | |
lighting_aspects_info = { | |
"Natural light": "Uses available light from the sun or sky, often creating soft, even illumination.", | |
"Studio lighting": "Controlled artificial lighting setup, allowing for precise manipulation of light and shadow.", | |
"Back light": "Light source behind the subject, creating silhouettes or rim lighting effects.", | |
"Split light": "Strong light source at 90-degree angle, lighting one half of the subject while leaving the other in shadow.", | |
"Broad light": "Light source at an angle to the subject, producing well-lit photographs with soft to moderate shadows.", | |
"Dim light": "Weak or distant light source, creating lower than average brightness and often dramatic images.", | |
"Flash photography": "Uses a brief, intense burst of light. Can be fill flash (even lighting) or harsh flash (strong contrasts).", | |
"Sunlight": "Direct light from the sun, often creating strong contrasts and warm tones.", | |
"Moonlight": "Soft, cool light from the moon, often creating a mysterious or romantic atmosphere.", | |
"Spotlight": "Focused beam of light illuminating a specific area, creating high contrast between light and shadow.", | |
"High-key lighting": "Bright, even lighting with minimal shadows, creating a light and airy feel.", | |
"Low-key lighting": "Predominantly dark tones with selective lighting, creating a moody or dramatic atmosphere.", | |
"Rembrandt lighting": "Classic portrait lighting technique creating a triangle of light on the cheek of the subject." | |
} | |
special_techniques_info = { | |
"Double exposure": "Superimposes two exposures to create a single image, often resulting in a dreamy or surreal effect.", | |
"Long exposure": "Uses a long shutter speed to capture motion over time, often creating smooth, blurred effects for moving elements.", | |
"Multiple exposure": "Superimposes multiple exposures, multiplying the subject or its key elements across the image.", | |
"HDR": "High Dynamic Range imaging, combining multiple exposures to capture a wider range of light and dark tones.", | |
"Bokeh effect": "Creates a soft, out-of-focus background, often with circular highlights.", | |
"Silhouette": "Captures the outline of a subject against a brighter background, creating a dramatic contrast.", | |
"Panning": "Follows a moving subject with the camera, creating a sharp subject with a blurred background.", | |
"Light painting": "Uses long exposure and moving light sources to 'paint' with light in the image.", | |
"Infrared photography": "Captures light in the infrared spectrum, often resulting in surreal, otherworldly images.", | |
"Ultraviolet photography": "Captures light in the ultraviolet spectrum, often revealing hidden patterns or creating a strong violet glow.", | |
"Kirlian photography": "High-voltage photographic technique that captures corona discharges around objects, creating a glowing effect.", | |
"Thermography": "Captures infrared radiation to create images based on temperature differences, resulting in false-color heat maps.", | |
"Astrophotography": "Specialized technique for capturing astronomical objects and celestial events, often resulting in stunning starry backgrounds.", | |
"Underwater photography": "Captures images beneath the surface of water, often in pools, seas, or aquariums.", | |
"Aerial photography": "Captures images from an elevated position, such as from drones, helicopters, or planes.", | |
"Macro photography": "Extreme close-up photography, revealing tiny details not visible to the naked eye." | |
} | |
color_effects_info = { | |
"Black and white": "Removes all color, leaving only shades of gray.", | |
"Sepia": "Reddish-brown monochrome effect, often associated with vintage photography.", | |
"Monochrome": "Uses variations of a single color.", | |
"Vintage color": "Muted or faded color palette reminiscent of old photographs.", | |
"Cross-processed": "Deliberate processing of film in the wrong chemicals, creating unusual color shifts.", | |
"Desaturated": "Reduces the intensity of all colors in the image.", | |
"Vivid colors": "Increases the saturation and intensity of colors.", | |
"Pastel colors": "Soft, pale colors with a light and airy feel.", | |
"High contrast": "Emphasizes the difference between light and dark areas in the image.", | |
"Low contrast": "Reduces the difference between light and dark areas, creating a softer look.", | |
"Color splash": "Converts most of the image to black and white while leaving one or more elements in color." | |
} | |
def get_dropdown_choices(info_dict): | |
return [f"{key}: {value}" for key, value in info_dict.items()] | |
def login(username, password): | |
if username == USERNAME and password == PASSWORD: | |
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(value="Login successful! You can now access the QR Code Art Generator tab.", visible=True) | |
else: | |
return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(value="Invalid username or password. Please try again.", visible=True) | |
# Gradio interface | |
with gr.Blocks(theme="Hev832/Applio", css=css, fill_width=True, fill_height=True) as demo: | |
with gr.Tab("Welcome"): | |
with gr.Row(): | |
with gr.Column(scale=2): | |
gr.Markdown( | |
""" | |
<img src="https://cdn-uploads.huggingface.co/production/uploads/64740cf7485a7c8e1bd51ac9/LVZnwLV43UUvKu3HORqSs.webp" alt="UDG" width="250" style="max-width: 100%; height: auto; class="centered-image"> | |
# 🎨 Underground Digital's Caption Captain: AI-Powered Art Inspiration | |
## Accelerate Your Creative Workflow with Intelligent Image Analysis | |
This innovative tool empowers Yamamoto's artists to quickly generate descriptive captions,<br> | |
training prompts, and tags from existing artwork, fueling the creative process for GenAI models. | |
## 🚀 How It Works: | |
1. **Upload Your Inspiration**: Drop in an image (e.g., a charcoal horse picture) that embodies your desired style. | |
2. **Choose Your Output**: Select from descriptive captions, training prompts, or tags. | |
3. **Customize the Results**: Adjust tone, length, and other parameters to fine-tune the output. | |
4. **Generate and Iterate**: Click 'Caption' to analyze your image and use the results to inspire new creations. | |
""" | |
) | |
with gr.Column(scale=1): | |
with gr.Row(): | |
gr.Markdown( | |
""" | |
Login below using the Yamamoto internal<br> | |
username and password to access the full app.<br> | |
Once logged in, a new tab will appear named<br> | |
"QR Code Art Generator" allowing you to access. | |
""" | |
) | |
with gr.Row(): | |
username = gr.Textbox(label="Username", placeholder="Enter your username", value="ugd") | |
with gr.Row(): | |
password = gr.Textbox(label="Password", type="password", placeholder="Enter your password", value="ugd!") | |
with gr.Row(): | |
login_button = gr.Button("Login", size="sm") | |
login_message = gr.Markdown(visible=False) | |
with gr.Tab("Caption Captain") as app_container: | |
with gr.Accordion("How to Use Caption Captain", open=False): | |
gr.Markdown(""" | |
# How to Use Caption Captain | |
<img src="https://cdn-uploads.huggingface.co/production/uploads/64740cf7485a7c8e1bd51ac9/Ce_Z478iOXljvpZ_Fr_Y7.png" alt="Captain" width="100" style="max-width: 100%; height: auto;"> | |
Hello, artist! Let's make some fun captions for your pictures. Here's how: | |
1. **Pick a Picture**: Find a cool picture you want to talk about and upload it. | |
2. **Choose What You Want**: | |
- **Caption Type**: | |
* "Descriptive" tells you what's in the picture | |
* "Training Prompt" helps computers make similar pictures | |
* "RNG-Tags" gives you short words about the picture | |
* "Style Prompt" creates detailed prompts for image generation | |
3. **Pick a Style** (for "Descriptive" and "Style Prompt" only): | |
- "Formal" sounds like a teacher talking | |
- "Informal" sounds like a friend chatting | |
4. **Decide How Long**: | |
- "Any" lets the computer decide | |
- Or pick a size from "very short" to "very long" | |
- You can even choose a specific number of words! | |
5. **Advanced Options** (for "Style Prompt" only): | |
- Choose lens type, film stock, composition, and lighting details | |
6. **Make the Caption**: Click the "Make My Caption!" button and watch the magic happen! | |
Remember, have fun and be creative with your captions! | |
## Tips for Great Captions: | |
- Try different types to see what you like best | |
- Experiment with formal and informal tones for fun variations | |
- Adjust the length to get just the right amount of detail | |
- For "Style Prompt", play with the advanced options for more specific results | |
- If you don't like a caption, just click "Make My Caption!" again for a new one | |
Have a great time captioning your art! | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
input_image = gr.Image(type="pil", label="Input Image") | |
caption_type = gr.Dropdown( | |
choices=["descriptive", "training_prompt", "rng-tags", "style_prompt"], | |
label="Caption Type", | |
value="descriptive", | |
) | |
caption_tone = gr.Dropdown( | |
choices=["formal", "informal"], | |
label="Caption Tone", | |
value="formal", | |
) | |
caption_length = gr.Dropdown( | |
choices=["any", "very short", "short", "medium-length", "long", "very long"] + | |
[str(i) for i in range(20, 261, 10)], | |
label="Caption Length", | |
value="any", | |
) | |
gr.Markdown("**Note:** Caption tone doesn't affect `rng-tags`, `training_prompt`, and `style_prompt`.") | |
with gr.Column(): | |
error_message = gr.Markdown(visible=False) # Add this line | |
output_caption = gr.Textbox(label="Generated Caption") | |
run_button = gr.Button("Make My Caption!") | |
# Container for advanced options | |
with gr.Column(visible=False) as advanced_options: | |
gr.Markdown("### Advanced Options for Style Prompt") | |
lens_type = gr.Dropdown( | |
choices=get_dropdown_choices(lens_types_info), | |
label="Lens Type", | |
info="Select a lens type to define the perspective and field of view of the image." | |
) | |
film_stock = gr.Dropdown( | |
choices=get_dropdown_choices(film_stocks_info), | |
label="Film Stock", | |
info="Choose a film stock to determine the color, grain, and overall look of the image." | |
) | |
composition_style = gr.Dropdown( | |
choices=get_dropdown_choices(composition_styles_info), | |
label="Composition Style", | |
info="Select a composition style to guide the arrangement of elements in the image." | |
) | |
lighting_aspect = gr.Dropdown( | |
choices=get_dropdown_choices(lighting_aspects_info), | |
label="Lighting Aspect", | |
info="Choose a lighting style to define the mood and atmosphere of the image." | |
) | |
special_technique = gr.Dropdown( | |
choices=get_dropdown_choices(special_techniques_info), | |
label="Special Technique", | |
info="Select a special photographic technique to add unique effects to the image." | |
) | |
color_effect = gr.Dropdown( | |
choices=get_dropdown_choices(color_effects_info), | |
label="Color Effect", | |
info="Choose a color effect to alter the overall color palette of the image." | |
) | |
def update_style_options(caption_type): | |
return gr.update(visible=caption_type == "style_prompt") | |
caption_type.change(update_style_options, inputs=[caption_type], outputs=[advanced_options]) | |
def process_and_handle_errors(input_image, caption_type, caption_tone, caption_length, lens_type, film_stock, composition_style, lighting_aspect, special_technique, color_effect): | |
try: | |
result = stream_chat(input_image, caption_type, caption_tone, caption_length, lens_type, film_stock, composition_style, lighting_aspect, special_technique, color_effect) | |
return gr.update(visible=False), result | |
except Exception as e: | |
return gr.update(visible=True, value=f"Error: {str(e)}"), "" | |
run_button.click( | |
fn=process_and_handle_errors, | |
inputs=[input_image, caption_type, caption_tone, caption_length, lens_type, film_stock, composition_style, lighting_aspect, special_technique, color_effect], | |
outputs=[error_message, output_caption] | |
) | |
if __name__ == "__main__": | |
demo.launch() |