BullseyeMxP commited on
Commit
fb6bf54
β€’
1 Parent(s): 271b85d

Create backup_app_solution.py

Browse files
Files changed (1) hide show
  1. backup_app_solution.py +477 -0
backup_app_solution.py ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ import triton
4
+ from huggingface_hub import InferenceClient
5
+ from torch import nn
6
+ from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM, BitsAndBytesConfig
7
+ from pathlib import Path
8
+ import torch
9
+ import torch.amp.autocast_mode
10
+ from PIL import Image
11
+ import os
12
+ import torchvision.transforms.functional as TVF
13
+ import gc
14
+
15
+ CLIP_PATH = "google/siglip-so400m-patch14-384"
16
+ CHECKPOINT_PATH = Path("/content/joy-caption-alpha-two/cgrkzexw-599808")
17
+ TITLE = """
18
+ <div style="text-align: center; max-width: 700px; margin: 0 auto;">
19
+ <h1 style="color: #FF00FF; font-size: 3em; margin-bottom: 0.5em;">Bullseye's JoyCaption Alpha Two</h1>
20
+ <p style="color: #00FFFF; font-size: 1.2em;">Unleash the power of AI-driven image captioning!</p>
21
+ </div>
22
+ """
23
+
24
+ CAPTION_TYPE_MAP = {
25
+ "Descriptive": ["Write a descriptive caption for this image in a formal tone.", "Write a descriptive caption for this image in a formal tone within {word_count} words.", "Write a {length} descriptive caption for this image in a formal tone."],
26
+ "Descriptive (Informal)": ["Write a descriptive caption for this image in a casual tone.", "Write a descriptive caption for this image in a casual tone within {word_count} words.", "Write a {length} descriptive caption for this image in a casual tone."],
27
+ "Training Prompt": ["Write a stable diffusion prompt for this image.", "Write a stable diffusion prompt for this image within {word_count} words.", "Write a {length} stable diffusion prompt for this image."],
28
+ "MidJourney": ["Write a MidJourney prompt for this image.", "Write a MidJourney prompt for this image within {word_count} words.", "Write a {length} MidJourney prompt for this image."],
29
+ "Booru tag list": ["Write a list of Booru tags for this image.", "Write a list of Booru tags for this image within {word_count} words.", "Write a {length} list of Booru tags for this image."],
30
+ "Booru-like tag list": ["Write a list of Booru-like tags for this image.", "Write a list of Booru-like tags for this image within {word_count} words.", "Write a {length} list of Booru-like tags for this image."],
31
+ "Art Critic": ["Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc.", "Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc. Keep it within {word_count} words.", "Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc. Keep it {length}."],
32
+ "Product Listing": ["Write a caption for this image as though it were a product listing.", "Write a caption for this image as though it were a product listing. Keep it under {word_count} words.", "Write a {length} caption for this image as though it were a product listing."],
33
+ "Social Media Post": ["Write a caption for this image as if it were being used for a social media post.", "Write a caption for this image as if it were being used for a social media post. Limit the caption to {word_count} words.", "Write a {length} caption for this image as if it were being used for a social media post."],
34
+ }
35
+
36
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
37
+
38
+ class ImageAdapter(nn.Module):
39
+ def __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):
40
+ super().__init__()
41
+ self.deep_extract = deep_extract
42
+
43
+ if self.deep_extract:
44
+ input_features = input_features * 5
45
+
46
+ self.linear1 = nn.Linear(input_features, output_features)
47
+ self.activation = nn.GELU()
48
+ self.linear2 = nn.Linear(output_features, output_features)
49
+ self.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)
50
+ self.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))
51
+
52
+ # Other tokens (<|image_start|>, <|image_end|>, <|eot_id|>)
53
+ self.other_tokens = nn.Embedding(3, output_features)
54
+ self.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3
55
+
56
+ def forward(self, vision_outputs: torch.Tensor):
57
+ if self.deep_extract:
58
+ x = torch.concat((
59
+ vision_outputs[-2],
60
+ vision_outputs[3],
61
+ vision_outputs[7],
62
+ vision_outputs[13],
63
+ vision_outputs[20],
64
+ ), dim=-1)
65
+ assert len(x.shape) == 3, f"Expected 3, got {len(x.shape)}" # batch, tokens, features
66
+ assert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}"
67
+ else:
68
+ x = vision_outputs[-2]
69
+
70
+ x = self.ln1(x)
71
+
72
+ if self.pos_emb is not None:
73
+ assert x.shape[-2:] == self.pos_emb.shape, f"Expected {self.pos_emb.shape}, got {x.shape[-2:]}"
74
+ x = x + self.pos_emb
75
+
76
+ x = self.linear1(x)
77
+ x = self.activation(x)
78
+ x = self.linear2(x)
79
+
80
+ # <|image_start|>, IMAGE, <|image_end|>
81
+ other_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))
82
+ assert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}"
83
+ x = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)
84
+
85
+ return x
86
+
87
+ def get_eot_embedding(self):
88
+ return self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)
89
+
90
+ # Load CLIP
91
+ print("Loading CLIP")
92
+ clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)
93
+ clip_model = AutoModel.from_pretrained(CLIP_PATH, torch_dtype=torch.float16)
94
+ clip_model = clip_model.vision_model
95
+
96
+ assert (CHECKPOINT_PATH / "clip_model.pt").exists()
97
+ print("Loading VLM's custom vision model")
98
+ checkpoint = torch.load(CHECKPOINT_PATH / "clip_model.pt", map_location='cpu')
99
+ checkpoint = {k.replace("_orig_mod.module.", ""): v for k, v in checkpoint.items()}
100
+ clip_model.load_state_dict(checkpoint)
101
+ del checkpoint
102
+
103
+ clip_model.eval()
104
+ clip_model.requires_grad_(False)
105
+ clip_model.to("cuda")
106
+ clip_model = torch.compile(clip_model)
107
+
108
+ # Tokenizer
109
+ print("Loading tokenizer")
110
+ tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_PATH / "text_model", use_fast=True)
111
+ assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f"Tokenizer is of type {type(tokenizer)}"
112
+
113
+ # LLM
114
+ print("Loading LLM")
115
+ print("Loading VLM's custom text model")
116
+
117
+ # Configure 4-bit quantization with more aggressive settings
118
+ bnb_config = BitsAndBytesConfig(
119
+ load_in_4bit=True,
120
+ bnb_4bit_quant_type="nf4",
121
+ bnb_4bit_compute_dtype=torch.float16,
122
+ bnb_4bit_use_double_quant=True,
123
+ llm_int8_enable_fp32_cpu_offload=True
124
+ )
125
+
126
+ text_model = AutoModelForCausalLM.from_pretrained(
127
+ CHECKPOINT_PATH / "text_model",
128
+ device_map="auto",
129
+ quantization_config=bnb_config,
130
+ torch_dtype=torch.float16,
131
+ low_cpu_mem_usage=True
132
+ )
133
+
134
+ # Enable memory efficient attention
135
+ text_model.config.use_memory_efficient_attention = True
136
+ text_model.gradient_checkpointing_enable()
137
+ text_model.eval()
138
+ # text_model = torch.compile(text_model) # Removed torch.compile for text_model
139
+
140
+ # Image Adapter
141
+ print("Loading image adapter")
142
+ image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)
143
+ image_adapter.load_state_dict(torch.load(CHECKPOINT_PATH / "image_adapter.pt", map_location="cpu"))
144
+ image_adapter.eval()
145
+ image_adapter.to("cuda")
146
+ image_adapter = torch.compile(image_adapter)
147
+
148
+ # Optimize CLIP model
149
+ clip_model = clip_model.half() # Convert to FP16
150
+ clip_model.eval()
151
+ clip_model.requires_grad_(False)
152
+ clip_model = torch.compile(clip_model)
153
+
154
+ # Optimize image adapter
155
+ image_adapter = image_adapter.half() # Convert to FP16
156
+ image_adapter.eval()
157
+ image_adapter.requires_grad_(False)
158
+ image_adapter = torch.compile(image_adapter)
159
+
160
+ @spaces.GPU()
161
+ @torch.no_grad()
162
+ def stream_chat(input_image: Image.Image, caption_type: str, caption_length: str | int, extra_options: list[str], name_input: str, custom_prompt: str) -> tuple[str, str]:
163
+ # Clear memory at the start
164
+ torch.cuda.empty_cache()
165
+ gc.collect()
166
+
167
+ # Build prompt string
168
+ length = None if caption_length == "any" else caption_length
169
+ if isinstance(length, str):
170
+ try:
171
+ length = int(length)
172
+ except ValueError:
173
+ pass
174
+
175
+ # Build prompt
176
+ if length is None:
177
+ map_idx = 0
178
+ elif isinstance(length, int):
179
+ map_idx = 1
180
+ elif isinstance(length, str):
181
+ map_idx = 2
182
+ else:
183
+ raise ValueError(f"Invalid caption length: {length}")
184
+
185
+ prompt_str = CAPTION_TYPE_MAP[caption_type][map_idx]
186
+
187
+ # Add extra options
188
+ if len(extra_options) > 0:
189
+ prompt_str += " " + " ".join(extra_options)
190
+
191
+ # Add name, length, word_count
192
+ prompt_str = prompt_str.format(name=name_input, length=caption_length, word_count=caption_length)
193
+
194
+ if custom_prompt.strip() != "":
195
+ prompt_str = custom_prompt.strip()
196
+
197
+ # Resize image to exact dimensions needed
198
+ image = input_image.resize((384, 384), Image.LANCZOS)
199
+ image = image.convert('RGB')
200
+ pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0
201
+ pixel_values = TVF.normalize(pixel_values, [0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
202
+ pixel_values = pixel_values.to('cuda', dtype=torch.float16)
203
+
204
+ # Process image with optimized memory usage
205
+ with torch.amp.autocast('cuda', dtype=torch.float16):
206
+ vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)
207
+ embedded_images = image_adapter(vision_outputs.hidden_states)
208
+ embedded_images = embedded_images.to('cuda', dtype=torch.float16)
209
+
210
+ # Build the conversation with minimal overhead
211
+ convo = [
212
+ {"role": "system", "content": "You are a helpful image captioner."},
213
+ {"role": "user", "content": prompt_str},
214
+ ]
215
+
216
+ # Format and tokenize efficiently
217
+ convo_string = tokenizer.apply_chat_template(convo, tokenize=False, add_generation_prompt=True)
218
+ convo_tokens = tokenizer.encode(convo_string, return_tensors="pt", add_special_tokens=False, truncation=False)
219
+ prompt_tokens = tokenizer.encode(prompt_str, return_tensors="pt", add_special_tokens=False, truncation=False)
220
+
221
+ convo_tokens = convo_tokens.squeeze(0)
222
+ prompt_tokens = prompt_tokens.squeeze(0)
223
+
224
+ # Calculate injection point
225
+ eot_id_indices = (convo_tokens == tokenizer.convert_tokens_to_ids("<|eot_id|>")).nonzero(as_tuple=True)[0].tolist()
226
+ preamble_len = eot_id_indices[1] - prompt_tokens.shape[0]
227
+
228
+ # Prepare input tensors efficiently
229
+ convo_tokens = convo_tokens.unsqueeze(0).to('cuda')
230
+ convo_embeds = text_model.model.embed_tokens(convo_tokens)
231
+
232
+ input_embeds = torch.cat([
233
+ convo_embeds[:, :preamble_len],
234
+ embedded_images,
235
+ convo_embeds[:, preamble_len:],
236
+ ], dim=1).to('cuda', dtype=torch.float16)
237
+
238
+ input_ids = torch.cat([
239
+ convo_tokens[:, :preamble_len],
240
+ torch.zeros((1, embedded_images.shape[1]), dtype=torch.long, device='cuda'),
241
+ convo_tokens[:, preamble_len:],
242
+ ], dim=1)
243
+ attention_mask = torch.ones_like(input_ids)
244
+
245
+ # Generate with optimized settings
246
+ with torch.amp.autocast('cuda', dtype=torch.float16):
247
+ generate_ids = text_model.generate(
248
+ input_ids,
249
+ inputs_embeds=input_embeds,
250
+ attention_mask=attention_mask,
251
+ max_new_tokens=300,
252
+ do_sample=True,
253
+ use_cache=True,
254
+ pad_token_id=tokenizer.pad_token_id,
255
+ num_beams=1, # Disable beam search for faster generation
256
+ temperature=0.7, # Lower temperature for more focused generation
257
+ top_p=0.9, # Nucleus sampling for efficiency
258
+ repetition_penalty=1.2, # Prevent repetition
259
+ )
260
+
261
+ # Process output efficiently
262
+ generate_ids = generate_ids[:, input_ids.shape[1]:]
263
+ if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids("<|eot_id|>"):
264
+ generate_ids = generate_ids[:, :-1]
265
+
266
+ caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0]
267
+
268
+ # Clear memory
269
+ del vision_outputs, embedded_images, input_embeds, generate_ids
270
+ torch.cuda.empty_cache()
271
+ gc.collect()
272
+
273
+ return prompt_str, caption.strip()
274
+
275
+ def process_directory(directory_path, caption_type, caption_length, extra_options, name_input, custom_prompt):
276
+ processed_images = []
277
+ captions = []
278
+
279
+ for filename in os.listdir(directory_path):
280
+ if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):
281
+ img_path = os.path.join(directory_path, filename)
282
+ img = Image.open(img_path)
283
+
284
+ _, caption = stream_chat(img, caption_type, caption_length, extra_options, name_input, custom_prompt)
285
+
286
+ # Save caption to a .txt file
287
+ txt_filename = os.path.splitext(filename)[0] + '.txt'
288
+ txt_path = os.path.join(directory_path, txt_filename)
289
+ with open(txt_path, 'w', encoding='utf-8') as f:
290
+ f.write(caption)
291
+
292
+ processed_images.append(img_path)
293
+ captions.append(caption)
294
+
295
+ return processed_images, "\n\n".join(captions) # Join captions with double newline for readability
296
+
297
+ def process_and_display(images, caption_type, caption_length, extra_options, name_input, custom_prompt):
298
+ processed_images = []
299
+ captions = []
300
+
301
+ for img_file in images:
302
+ img = Image.open(img_file.name)
303
+ _, caption = stream_chat(img, caption_type, caption_length, extra_options, name_input, custom_prompt)
304
+ processed_images.append(img_file.name)
305
+ captions.append(caption)
306
+
307
+ return processed_images, "\n\n".join(captions) # Join captions with double newline for readability
308
+
309
+ def process_input(input_images, directory_path, caption_type, caption_length, extra_options, name_input, custom_prompt):
310
+ if directory_path:
311
+ return process_directory(directory_path, caption_type, caption_length, extra_options, name_input, custom_prompt)
312
+ elif input_images:
313
+ return process_and_display(input_images, caption_type, caption_length, extra_options, name_input, custom_prompt)
314
+ else:
315
+ return [], ""
316
+
317
+ # Custom CSS for a futuristic, neon-inspired theme
318
+ custom_css = """
319
+ @import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
320
+
321
+ body {
322
+ background-color: #000000;
323
+ color: #00FFFF;
324
+ font-family: 'Orbitron', sans-serif;
325
+ }
326
+
327
+ .gradio-container {
328
+ background: linear-gradient(45deg, #1a1a2e, #16213e);
329
+ border: 2px solid #FF00FF;
330
+ border-radius: 15px;
331
+ box-shadow: 0 0 20px #FF00FF;
332
+ }
333
+
334
+ .input-box, .output-box {
335
+ background-color: rgba(15, 52, 96, 0.7);
336
+ border: 1px solid #00FFFF;
337
+ border-radius: 10px;
338
+ padding: 15px;
339
+ margin: 10px 0;
340
+ box-shadow: 0 0 10px #00FFFF;
341
+ }
342
+
343
+ .input-box label, .output-box label {
344
+ color: #FF00FF;
345
+ font-weight: bold;
346
+ text-shadow: 0 0 5px #FF00FF;
347
+ }
348
+
349
+ .gr-button {
350
+ background: linear-gradient(45deg, #4a0e4e, #7a1e82);
351
+ border: none;
352
+ color: #FFFFFF;
353
+ font-weight: bold;
354
+ text-transform: uppercase;
355
+ transition: all 0.3s ease;
356
+ }
357
+
358
+ .gr-button:hover {
359
+ background: linear-gradient(45deg, #7a1e82, #4a0e4e);
360
+ box-shadow: 0 0 15px #FF00FF;
361
+ transform: scale(1.05);
362
+ }
363
+
364
+ .gr-dropdown {
365
+ background-color: #0f3460;
366
+ border: 1px solid #00FFFF;
367
+ color: #FFFFFF;
368
+ }
369
+
370
+ .gr-checkbox-group {
371
+ background-color: rgba(15, 52, 96, 0.7);
372
+ border: 1px solid #00FFFF;
373
+ border-radius: 10px;
374
+ padding: 10px;
375
+ }
376
+
377
+ .gr-checkbox-group label {
378
+ color: #FFFFFF;
379
+ }
380
+
381
+ .gr-form {
382
+ border: 1px solid #FF00FF;
383
+ border-radius: 10px;
384
+ padding: 20px;
385
+ margin: 10px 0;
386
+ background: rgba(26, 26, 46, 0.7);
387
+ }
388
+
389
+ .gr-input {
390
+ background-color: #0f3460;
391
+ border: 1px solid #00FFFF;
392
+ color: #FFFFFF;
393
+ border-radius: 5px;
394
+ }
395
+
396
+ .gr-input:focus {
397
+ box-shadow: 0 0 10px #00FFFF;
398
+ }
399
+
400
+ .gr-panel {
401
+ border: 1px solid #FF00FF;
402
+ border-radius: 10px;
403
+ background: rgba(22, 33, 62, 0.7);
404
+ }
405
+ """
406
+
407
+ with gr.Blocks(css=custom_css) as demo:
408
+ gr.HTML(TITLE)
409
+
410
+ with gr.Row():
411
+ with gr.Column(scale=1):
412
+ input_images = gr.File(file_count="multiple", label="πŸ“Έ Upload Images", elem_classes="input-box")
413
+ directory_input = gr.Textbox(label="πŸ“ Or Enter Directory Path", elem_classes="input-box")
414
+
415
+ with gr.Column(scale=2):
416
+ with gr.Group():
417
+ caption_type = gr.Dropdown(
418
+ choices=list(CAPTION_TYPE_MAP.keys()),
419
+ label="🎭 Caption Type",
420
+ value="Descriptive",
421
+ elem_classes="input-box"
422
+ )
423
+
424
+ caption_length = gr.Dropdown(
425
+ choices=["any", "very short", "short", "medium-length", "long", "very long"] +
426
+ [str(i) for i in range(20, 261, 10)],
427
+ label="πŸ“ Caption Length",
428
+ value="long",
429
+ elem_classes="input-box"
430
+ )
431
+
432
+ with gr.Accordion("πŸ”§ Advanced Options", open=False):
433
+ extra_options = gr.CheckboxGroup(
434
+ choices=[
435
+ "If there is a person/character in the image you must refer to them as {name}.",
436
+ "Do NOT include information about people/characters that cannot be changed (like ethnicity, gender, etc), but do still include changeable attributes (like hair style).",
437
+ "Include information about lighting.",
438
+ "Include information about camera angle.",
439
+ "Include information about whether there is a watermark or not.",
440
+ "Include information about whether there are JPEG artifacts or not.",
441
+ "If it is a photo you MUST include information about what camera was likely used and details such as aperture, shutter speed, ISO, etc.",
442
+ "Do NOT include anything sexual; keep it PG.",
443
+ "Do NOT mention the image's resolution.",
444
+ "You MUST include information about the subjective aesthetic quality of the image from low to very high.",
445
+ "Include information on the image's composition style, such as leading lines, rule of thirds, or symmetry.",
446
+ "Do NOT mention any text that is in the image.",
447
+ "Specify the depth of field and whether the background is in focus or blurred.",
448
+ "If applicable, mention the likely use of artificial or natural lighting sources.",
449
+ "Do NOT use any ambiguous language.",
450
+ "Include whether the image is sfw, suggestive, or nsfw.",
451
+ "ONLY describe the most important elements of the image."
452
+ ],
453
+ label="Extra Options",
454
+ elem_classes="input-box"
455
+ )
456
+
457
+ name_input = gr.Textbox(label="πŸ‘€ Person/Character Name (if applicable)", elem_classes="input-box")
458
+ gr.Markdown("**Note:** Name input is only used if an Extra Option is selected that requires it.")
459
+
460
+ custom_prompt = gr.Textbox(label="🎨 Custom Prompt (optional, will override all other settings)", elem_classes="input-box")
461
+ gr.Markdown("**Note:** Alpha Two is not a general instruction follower and will not follow prompts outside its training data well. Use this feature with caution.")
462
+
463
+ with gr.Row():
464
+ run_button = gr.Button("πŸš€ Generate Captions", elem_classes="gr-button")
465
+
466
+ with gr.Row():
467
+ output_gallery = gr.Gallery(label="Processed Images", elem_classes="output-box")
468
+ output_text = gr.Textbox(label="Generated Captions", elem_classes="output-box", lines=10)
469
+
470
+ run_button.click(
471
+ fn=process_input,
472
+ inputs=[input_images, directory_input, caption_type, caption_length, extra_options, name_input, custom_prompt],
473
+ outputs=[output_gallery, output_text]
474
+ )
475
+
476
+ if __name__ == "__main__":
477
+ demo.launch(share=True)