Update app.py
Browse files
app.py
CHANGED
@@ -10,13 +10,13 @@ import torch
|
|
10 |
from PIL import Image
|
11 |
from transformers import AutoProcessor, AutoModelForCausalLM, pipeline
|
12 |
|
13 |
-
#
|
14 |
try:
|
15 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
16 |
except:
|
17 |
print("Flash attention installation skipped")
|
18 |
|
19 |
-
# OpenAI
|
20 |
openai_api_key = os.getenv("OPENAI_API")
|
21 |
if openai_api_key:
|
22 |
client = OpenAI(api_key=openai_api_key)
|
@@ -24,22 +24,22 @@ else:
|
|
24 |
print("Warning: OPENAI_API key not found in environment variables")
|
25 |
client = None
|
26 |
|
27 |
-
#
|
28 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
29 |
|
30 |
-
# Florence
|
31 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
32 |
florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()
|
33 |
florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)
|
34 |
|
35 |
-
#
|
36 |
def translate_prompt(prompt):
|
37 |
-
if any("\uAC00" <= char <= "\uD7A3" for char in prompt): #
|
38 |
translated = translator(prompt, max_length=512)[0]['translation_text']
|
39 |
return translated
|
40 |
return prompt
|
41 |
|
42 |
-
# Florence
|
43 |
def florence_caption(image):
|
44 |
if not isinstance(image, Image.Image):
|
45 |
image = Image.fromarray(image)
|
@@ -61,7 +61,7 @@ def florence_caption(image):
|
|
61 |
)
|
62 |
return parsed_answer["<MORE_DETAILED_CAPTION>"]
|
63 |
|
64 |
-
# JSON
|
65 |
def load_json_file(file_name):
|
66 |
file_path = os.path.join("data", file_name)
|
67 |
try:
|
@@ -71,7 +71,7 @@ def load_json_file(file_name):
|
|
71 |
print(f"Warning: Could not load {file_name}. Using empty list.")
|
72 |
return []
|
73 |
|
74 |
-
# JSON
|
75 |
ARTFORM = load_json_file("artform.json")
|
76 |
PHOTO_TYPE = load_json_file("photo_type.json")
|
77 |
BODY_TYPES = load_json_file("body_types.json")
|
@@ -91,7 +91,7 @@ COMPOSITION = load_json_file("composition.json")
|
|
91 |
POSE = load_json_file("pose.json")
|
92 |
BACKGROUND = load_json_file("background.json")
|
93 |
|
94 |
-
#
|
95 |
class PromptGenerator:
|
96 |
def __init__(self, seed=None):
|
97 |
self.rng = random.Random(seed)
|
@@ -171,7 +171,7 @@ class PromptGenerator:
|
|
171 |
components = []
|
172 |
custom = kwargs.get("custom", "")
|
173 |
if custom:
|
174 |
-
custom = translate_prompt(custom) #
|
175 |
components.append(custom)
|
176 |
is_photographer = kwargs.get("artform", "").lower() == "photography" or (
|
177 |
kwargs.get("artform", "").lower() == "random"
|
@@ -350,7 +350,7 @@ class OpenAIGenerationNode:
|
|
350 |
if not self.client:
|
351 |
return "Error: OpenAI API key not found. Please set OPENAI_API environment variable."
|
352 |
|
353 |
-
#
|
354 |
openai_model = "gpt-4.1-mini"
|
355 |
|
356 |
default_happy_prompt = """Create a detailed visually descriptive caption of this description, which will be used as a prompt for a text to image AI system (caption only, no instructions like "create an image").Remove any mention of digital artwork or artwork style. Give detailed visual descriptions of the character(s), including ethnicity, skin tone, expression etc. Imagine using keywords for a still for someone who has aphantasia. Describe the image style, e.g. any photographic or art styles / techniques utilized. Make sure to fully describe all aspects of the cinematography, with abundant technical details and visual descriptions. If there is more than one image, combine the elements and characters from all of the images creatively into a single cohesive composition with a single background, inventing an interaction between the characters. Be creative in combining the characters into a single cohesive scene. Focus on two primary characters (or one) and describe an interesting interaction between them, such as a hug, a kiss, a fight, giving an object, an emotional reaction / interaction. If there is more than one background in the images, pick the most appropriate one. Your output is only the caption itself, no comments or extra formatting. The caption is in a single long paragraph. If you feel the images are inappropriate, invent a new scene / characters inspired by these. Additionally, incorporate a specific movie director's visual style and describe the lighting setup in detail, including the type, color, and placement of light sources to create the desired mood and atmosphere. Always frame the scene, including details about the film grain, color grading, and any artifacts or characteristics specific."""
|
@@ -383,7 +383,7 @@ You are allowed to make up film and branding names, and do them like 80's, 90's
|
|
383 |
char_limit = compression_chars[compression_level]
|
384 |
base_prompt += f" Compress the output to be concise while retaining key visual details. MAX OUTPUT SIZE no more than {char_limit} characters."
|
385 |
|
386 |
-
#
|
387 |
response = self.client.chat.completions.create(
|
388 |
model=openai_model,
|
389 |
messages=[
|
@@ -401,10 +401,10 @@ You are allowed to make up film and branding names, and do them like 80's, 90's
|
|
401 |
top_p=1
|
402 |
)
|
403 |
|
404 |
-
#
|
405 |
output = response.choices[0].message.content
|
406 |
|
407 |
-
#
|
408 |
if ": " in output:
|
409 |
output = output.split(": ", 1)[1].strip()
|
410 |
elif output.lower().startswith("here"):
|
@@ -412,7 +412,7 @@ You are allowed to make up film and branding names, and do them like 80's, 90's
|
|
412 |
if len(sentences) > 1:
|
413 |
output = ". ".join(sentences[1:]).strip()
|
414 |
|
415 |
-
#
|
416 |
self.save_prompt(output)
|
417 |
|
418 |
return output
|
@@ -421,7 +421,7 @@ You are allowed to make up film and branding names, and do them like 80's, 90's
|
|
421 |
print(f"An error occurred: {e}")
|
422 |
return f"Error occurred while processing the request: {str(e)}"
|
423 |
|
424 |
-
#
|
425 |
css = """
|
426 |
footer {
|
427 |
visibility: hidden;
|
@@ -438,7 +438,8 @@ footer {
|
|
438 |
display: flex;
|
439 |
justify-content: center;
|
440 |
gap: 15px;
|
441 |
-
margin: 20px
|
|
|
442 |
}
|
443 |
.prompt-generator-container {
|
444 |
background: #f8f9fa;
|
@@ -487,16 +488,16 @@ def create_interface():
|
|
487 |
openai_node = OpenAIGenerationNode()
|
488 |
|
489 |
with gr.Blocks(theme="soft", css=css) as demo:
|
490 |
-
#
|
491 |
with gr.Row(elem_classes="main-title"):
|
492 |
gr.HTML("""
|
493 |
<h1>π¨ Flux Prompt Generator</h1>
|
494 |
<p style="text-align: center; color: #7f8c8d; font-size: 1.1em;">
|
495 |
-
|
496 |
</p>
|
497 |
""")
|
498 |
|
499 |
-
#
|
500 |
with gr.Row(elem_classes="badge-container"):
|
501 |
gr.HTML("""
|
502 |
<a href="https://huggingface.co/spaces/openfree/Best-AI" target="_blank">
|
@@ -507,102 +508,102 @@ def create_interface():
|
|
507 |
</a>
|
508 |
""")
|
509 |
|
510 |
-
#
|
511 |
with gr.Row():
|
512 |
-
#
|
513 |
with gr.Column(scale=2):
|
514 |
-
gr.HTML('<div class="section-header">π―
|
515 |
-
seed = gr.Slider(0, 30000, label='
|
516 |
-
custom = gr.Textbox(label="βοΈ
|
517 |
-
subject = gr.Textbox(label="π
|
518 |
|
519 |
-
#
|
520 |
-
gr.HTML('<div class="section-header">β‘
|
521 |
global_option = gr.Radio(
|
522 |
["Disabled", "Random", "No Figure Rand"],
|
523 |
-
label="
|
524 |
value="Disabled",
|
525 |
-
info="
|
526 |
)
|
527 |
|
528 |
-
#
|
529 |
-
with gr.Accordion("π¨
|
530 |
-
artform = gr.Dropdown(["disabled", "random"] + ARTFORM, label="
|
531 |
-
photo_type = gr.Dropdown(["disabled", "random"] + PHOTO_TYPE, label="
|
532 |
|
533 |
-
with gr.Accordion("π€
|
534 |
-
body_types = gr.Dropdown(["disabled", "random"] + BODY_TYPES, label="
|
535 |
-
default_tags = gr.Dropdown(["disabled", "random"] + DEFAULT_TAGS, label="
|
536 |
-
roles = gr.Dropdown(["disabled", "random"] + ROLES, label="
|
537 |
-
hairstyles = gr.Dropdown(["disabled", "random"] + HAIRSTYLES, label="
|
538 |
-
clothing = gr.Dropdown(["disabled", "random"] + CLOTHING, label="
|
539 |
|
540 |
-
with gr.Accordion("ποΈ
|
541 |
-
place = gr.Dropdown(["disabled", "random"] + PLACE, label="
|
542 |
-
lighting = gr.Dropdown(["disabled", "random"] + LIGHTING, label="
|
543 |
-
composition = gr.Dropdown(["disabled", "random"] + COMPOSITION, label="
|
544 |
-
pose = gr.Dropdown(["disabled", "random"] + POSE, label="
|
545 |
-
background = gr.Dropdown(["disabled", "random"] + BACKGROUND, label="
|
546 |
|
547 |
-
with gr.Accordion("π
|
548 |
-
additional_details = gr.Dropdown(["disabled", "random"] + ADDITIONAL_DETAILS, label="
|
549 |
-
photography_styles = gr.Dropdown(["disabled", "random"] + PHOTOGRAPHY_STYLES, label="
|
550 |
-
device = gr.Dropdown(["disabled", "random"] + DEVICE, label="
|
551 |
-
photographer = gr.Dropdown(["disabled", "random"] + PHOTOGRAPHER, label="
|
552 |
-
artist = gr.Dropdown(["disabled", "random"] + ARTIST, label="
|
553 |
-
digital_artform = gr.Dropdown(["disabled", "random"] + DIGITAL_ARTFORM, label="
|
554 |
|
555 |
-
generate_button = gr.Button("π
|
556 |
|
557 |
-
#
|
558 |
with gr.Column(scale=2):
|
559 |
-
with gr.Accordion("πΌοΈ
|
560 |
-
input_image = gr.Image(label="
|
561 |
-
caption_output = gr.Textbox(label="
|
562 |
with gr.Row():
|
563 |
-
create_caption_button = gr.Button("π
|
564 |
-
add_caption_button = gr.Button("β
|
565 |
|
566 |
-
gr.HTML('<div class="section-header">π
|
567 |
-
output = gr.Textbox(label="
|
568 |
-
with gr.Accordion("
|
569 |
t5xxl_output = gr.Textbox(label="T5XXL", lines=2)
|
570 |
clip_l_output = gr.Textbox(label="CLIP L", lines=2)
|
571 |
clip_g_output = gr.Textbox(label="CLIP G", lines=2)
|
572 |
|
573 |
-
#
|
574 |
with gr.Column(scale=2):
|
575 |
-
gr.HTML('<div class="section-header">π€ OpenAI
|
576 |
-
gr.HTML("<p style='text-align: center; color: #95a5a6;'
|
577 |
|
578 |
with gr.Row():
|
579 |
-
happy_talk = gr.Checkbox(label="π Happy Talk", value=True, info="
|
580 |
-
compress = gr.Checkbox(label="ποΈ
|
581 |
|
582 |
compression_level = gr.Radio(
|
583 |
["soft", "medium", "hard"],
|
584 |
-
label="
|
585 |
value="hard",
|
586 |
visible=True
|
587 |
)
|
588 |
|
589 |
-
poster = gr.Checkbox(label="π¬
|
590 |
|
591 |
custom_base_prompt = gr.Textbox(
|
592 |
-
label="π οΈ
|
593 |
lines=5,
|
594 |
-
placeholder="
|
595 |
)
|
596 |
|
597 |
-
generate_text_button = gr.Button("β¨
|
598 |
|
599 |
text_output = gr.Textbox(
|
600 |
-
label="π― AI
|
601 |
lines=10,
|
602 |
elem_classes="output-container"
|
603 |
)
|
604 |
|
605 |
-
#
|
606 |
def create_caption(image):
|
607 |
if image is not None:
|
608 |
return florence_caption(image)
|
@@ -635,14 +636,14 @@ def create_interface():
|
|
635 |
outputs=text_output
|
636 |
)
|
637 |
|
638 |
-
#
|
639 |
compress.change(
|
640 |
lambda x: gr.update(visible=x),
|
641 |
inputs=[compress],
|
642 |
outputs=[compression_level]
|
643 |
)
|
644 |
|
645 |
-
#
|
646 |
def update_all_options(choice):
|
647 |
updates = []
|
648 |
dropdown_list = [
|
@@ -657,7 +658,7 @@ def create_interface():
|
|
657 |
updates = [gr.update(value="random") for _ in dropdown_list]
|
658 |
else: # No Figure Random
|
659 |
updates = []
|
660 |
-
#
|
661 |
character_dropdowns = [photo_type, body_types, default_tags, roles, hairstyles, clothing, pose, additional_details]
|
662 |
other_dropdowns = [artform, place, lighting, composition, background, photography_styles, device, photographer, artist, digital_artform]
|
663 |
|
|
|
10 |
from PIL import Image
|
11 |
from transformers import AutoProcessor, AutoModelForCausalLM, pipeline
|
12 |
|
13 |
+
# Skip installation process if not needed
|
14 |
try:
|
15 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
16 |
except:
|
17 |
print("Flash attention installation skipped")
|
18 |
|
19 |
+
# Initialize OpenAI client (API key from environment variable)
|
20 |
openai_api_key = os.getenv("OPENAI_API")
|
21 |
if openai_api_key:
|
22 |
client = OpenAI(api_key=openai_api_key)
|
|
|
24 |
print("Warning: OPENAI_API key not found in environment variables")
|
25 |
client = None
|
26 |
|
27 |
+
# Add translation model
|
28 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
29 |
|
30 |
+
# Initialize Florence model
|
31 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
32 |
florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()
|
33 |
florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)
|
34 |
|
35 |
+
# Korean prompt translation function
|
36 |
def translate_prompt(prompt):
|
37 |
+
if any("\uAC00" <= char <= "\uD7A3" for char in prompt): # If Korean is included
|
38 |
translated = translator(prompt, max_length=512)[0]['translation_text']
|
39 |
return translated
|
40 |
return prompt
|
41 |
|
42 |
+
# Florence caption function
|
43 |
def florence_caption(image):
|
44 |
if not isinstance(image, Image.Image):
|
45 |
image = Image.fromarray(image)
|
|
|
61 |
)
|
62 |
return parsed_answer["<MORE_DETAILED_CAPTION>"]
|
63 |
|
64 |
+
# JSON file load function
|
65 |
def load_json_file(file_name):
|
66 |
file_path = os.path.join("data", file_name)
|
67 |
try:
|
|
|
71 |
print(f"Warning: Could not load {file_name}. Using empty list.")
|
72 |
return []
|
73 |
|
74 |
+
# Load JSON data
|
75 |
ARTFORM = load_json_file("artform.json")
|
76 |
PHOTO_TYPE = load_json_file("photo_type.json")
|
77 |
BODY_TYPES = load_json_file("body_types.json")
|
|
|
91 |
POSE = load_json_file("pose.json")
|
92 |
BACKGROUND = load_json_file("background.json")
|
93 |
|
94 |
+
# Prompt generation class
|
95 |
class PromptGenerator:
|
96 |
def __init__(self, seed=None):
|
97 |
self.rng = random.Random(seed)
|
|
|
171 |
components = []
|
172 |
custom = kwargs.get("custom", "")
|
173 |
if custom:
|
174 |
+
custom = translate_prompt(custom) # Apply translation
|
175 |
components.append(custom)
|
176 |
is_photographer = kwargs.get("artform", "").lower() == "photography" or (
|
177 |
kwargs.get("artform", "").lower() == "random"
|
|
|
350 |
if not self.client:
|
351 |
return "Error: OpenAI API key not found. Please set OPENAI_API environment variable."
|
352 |
|
353 |
+
# Fixed model: gpt-4.1-mini
|
354 |
openai_model = "gpt-4.1-mini"
|
355 |
|
356 |
default_happy_prompt = """Create a detailed visually descriptive caption of this description, which will be used as a prompt for a text to image AI system (caption only, no instructions like "create an image").Remove any mention of digital artwork or artwork style. Give detailed visual descriptions of the character(s), including ethnicity, skin tone, expression etc. Imagine using keywords for a still for someone who has aphantasia. Describe the image style, e.g. any photographic or art styles / techniques utilized. Make sure to fully describe all aspects of the cinematography, with abundant technical details and visual descriptions. If there is more than one image, combine the elements and characters from all of the images creatively into a single cohesive composition with a single background, inventing an interaction between the characters. Be creative in combining the characters into a single cohesive scene. Focus on two primary characters (or one) and describe an interesting interaction between them, such as a hug, a kiss, a fight, giving an object, an emotional reaction / interaction. If there is more than one background in the images, pick the most appropriate one. Your output is only the caption itself, no comments or extra formatting. The caption is in a single long paragraph. If you feel the images are inappropriate, invent a new scene / characters inspired by these. Additionally, incorporate a specific movie director's visual style and describe the lighting setup in detail, including the type, color, and placement of light sources to create the desired mood and atmosphere. Always frame the scene, including details about the film grain, color grading, and any artifacts or characteristics specific."""
|
|
|
383 |
char_limit = compression_chars[compression_level]
|
384 |
base_prompt += f" Compress the output to be concise while retaining key visual details. MAX OUTPUT SIZE no more than {char_limit} characters."
|
385 |
|
386 |
+
# Correct OpenAI API call format
|
387 |
response = self.client.chat.completions.create(
|
388 |
model=openai_model,
|
389 |
messages=[
|
|
|
401 |
top_p=1
|
402 |
)
|
403 |
|
404 |
+
# Extract response
|
405 |
output = response.choices[0].message.content
|
406 |
|
407 |
+
# Clean output
|
408 |
if ": " in output:
|
409 |
output = output.split(": ", 1)[1].strip()
|
410 |
elif output.lower().startswith("here"):
|
|
|
412 |
if len(sentences) > 1:
|
413 |
output = ". ".join(sentences[1:]).strip()
|
414 |
|
415 |
+
# Save prompt
|
416 |
self.save_prompt(output)
|
417 |
|
418 |
return output
|
|
|
421 |
print(f"An error occurred: {e}")
|
422 |
return f"Error occurred while processing the request: {str(e)}"
|
423 |
|
424 |
+
# Enhanced CSS
|
425 |
css = """
|
426 |
footer {
|
427 |
visibility: hidden;
|
|
|
438 |
display: flex;
|
439 |
justify-content: center;
|
440 |
gap: 15px;
|
441 |
+
margin: 20px auto;
|
442 |
+
max-width: 100%;
|
443 |
}
|
444 |
.prompt-generator-container {
|
445 |
background: #f8f9fa;
|
|
|
488 |
openai_node = OpenAIGenerationNode()
|
489 |
|
490 |
with gr.Blocks(theme="soft", css=css) as demo:
|
491 |
+
# Header
|
492 |
with gr.Row(elem_classes="main-title"):
|
493 |
gr.HTML("""
|
494 |
<h1>π¨ Flux Prompt Generator</h1>
|
495 |
<p style="text-align: center; color: #7f8c8d; font-size: 1.1em;">
|
496 |
+
Korean Input Support | AI-Based Prompt Generator
|
497 |
</p>
|
498 |
""")
|
499 |
|
500 |
+
# Badges
|
501 |
with gr.Row(elem_classes="badge-container"):
|
502 |
gr.HTML("""
|
503 |
<a href="https://huggingface.co/spaces/openfree/Best-AI" target="_blank">
|
|
|
508 |
</a>
|
509 |
""")
|
510 |
|
511 |
+
# Main container
|
512 |
with gr.Row():
|
513 |
+
# Left panel - Settings
|
514 |
with gr.Column(scale=2):
|
515 |
+
gr.HTML('<div class="section-header">π― Basic Settings</div>')
|
516 |
+
seed = gr.Slider(0, 30000, label='Seed Value', step=1, value=random.randint(0,30000))
|
517 |
+
custom = gr.Textbox(label="βοΈ Custom Prompt (Korean Available)", placeholder="Enter your description...")
|
518 |
+
subject = gr.Textbox(label="π Subject (Optional)", placeholder="e.g., beautiful woman, cute cat, etc...")
|
519 |
|
520 |
+
# Global option settings
|
521 |
+
gr.HTML('<div class="section-header">β‘ Quick Settings</div>')
|
522 |
global_option = gr.Radio(
|
523 |
["Disabled", "Random", "No Figure Rand"],
|
524 |
+
label="Apply All Options at Once:",
|
525 |
value="Disabled",
|
526 |
+
info="Change all settings at once"
|
527 |
)
|
528 |
|
529 |
+
# Detailed settings
|
530 |
+
with gr.Accordion("π¨ Artform & Photo Type", open=False):
|
531 |
+
artform = gr.Dropdown(["disabled", "random"] + ARTFORM, label="Artform", value="disabled")
|
532 |
+
photo_type = gr.Dropdown(["disabled", "random"] + PHOTO_TYPE, label="Photo Type", value="disabled")
|
533 |
|
534 |
+
with gr.Accordion("π€ Character Settings", open=False):
|
535 |
+
body_types = gr.Dropdown(["disabled", "random"] + BODY_TYPES, label="Body Type", value="disabled")
|
536 |
+
default_tags = gr.Dropdown(["disabled", "random"] + DEFAULT_TAGS, label="Default Tags", value="disabled")
|
537 |
+
roles = gr.Dropdown(["disabled", "random"] + ROLES, label="Role", value="disabled")
|
538 |
+
hairstyles = gr.Dropdown(["disabled", "random"] + HAIRSTYLES, label="Hairstyle", value="disabled")
|
539 |
+
clothing = gr.Dropdown(["disabled", "random"] + CLOTHING, label="Clothing", value="disabled")
|
540 |
|
541 |
+
with gr.Accordion("ποΈ Scene Settings", open=False):
|
542 |
+
place = gr.Dropdown(["disabled", "random"] + PLACE, label="Place", value="disabled")
|
543 |
+
lighting = gr.Dropdown(["disabled", "random"] + LIGHTING, label="Lighting", value="disabled")
|
544 |
+
composition = gr.Dropdown(["disabled", "random"] + COMPOSITION, label="Composition", value="disabled")
|
545 |
+
pose = gr.Dropdown(["disabled", "random"] + POSE, label="Pose", value="disabled")
|
546 |
+
background = gr.Dropdown(["disabled", "random"] + BACKGROUND, label="Background", value="disabled")
|
547 |
|
548 |
+
with gr.Accordion("π Style & Artist", open=False):
|
549 |
+
additional_details = gr.Dropdown(["disabled", "random"] + ADDITIONAL_DETAILS, label="Additional Details", value="disabled")
|
550 |
+
photography_styles = gr.Dropdown(["disabled", "random"] + PHOTOGRAPHY_STYLES, label="Photography Style", value="disabled")
|
551 |
+
device = gr.Dropdown(["disabled", "random"] + DEVICE, label="Camera/Device", value="disabled")
|
552 |
+
photographer = gr.Dropdown(["disabled", "random"] + PHOTOGRAPHER, label="Photographer", value="disabled")
|
553 |
+
artist = gr.Dropdown(["disabled", "random"] + ARTIST, label="Artist", value="disabled")
|
554 |
+
digital_artform = gr.Dropdown(["disabled", "random"] + DIGITAL_ARTFORM, label="Digital Artform", value="disabled")
|
555 |
|
556 |
+
generate_button = gr.Button("π Generate Prompt", variant="primary", elem_classes="generate-button")
|
557 |
|
558 |
+
# Middle panel - Image and output
|
559 |
with gr.Column(scale=2):
|
560 |
+
with gr.Accordion("πΌοΈ Image Caption Generation", open=False):
|
561 |
+
input_image = gr.Image(label="Upload Image (Optional)", type="pil")
|
562 |
+
caption_output = gr.Textbox(label="Generated Caption", lines=3)
|
563 |
with gr.Row():
|
564 |
+
create_caption_button = gr.Button("π Generate Caption", variant="secondary")
|
565 |
+
add_caption_button = gr.Button("β Add to Prompt", variant="secondary")
|
566 |
|
567 |
+
gr.HTML('<div class="section-header">π Generated Prompts</div>')
|
568 |
+
output = gr.Textbox(label="Main Prompt", lines=4)
|
569 |
+
with gr.Accordion("Advanced Output Options", open=False):
|
570 |
t5xxl_output = gr.Textbox(label="T5XXL", lines=2)
|
571 |
clip_l_output = gr.Textbox(label="CLIP L", lines=2)
|
572 |
clip_g_output = gr.Textbox(label="CLIP G", lines=2)
|
573 |
|
574 |
+
# Right panel - OpenAI
|
575 |
with gr.Column(scale=2):
|
576 |
+
gr.HTML('<div class="section-header">π€ OpenAI Prompt Enhancement</div>')
|
577 |
+
gr.HTML("<p style='text-align: center; color: #95a5a6;'>Model Used: gpt-4.1-mini</p>")
|
578 |
|
579 |
with gr.Row():
|
580 |
+
happy_talk = gr.Checkbox(label="π Happy Talk", value=True, info="More detailed description")
|
581 |
+
compress = gr.Checkbox(label="ποΈ Compress", value=True, info="Limit output length")
|
582 |
|
583 |
compression_level = gr.Radio(
|
584 |
["soft", "medium", "hard"],
|
585 |
+
label="Compression Strength",
|
586 |
value="hard",
|
587 |
visible=True
|
588 |
)
|
589 |
|
590 |
+
poster = gr.Checkbox(label="π¬ Movie Poster Style", value=False)
|
591 |
|
592 |
custom_base_prompt = gr.Textbox(
|
593 |
+
label="π οΈ Custom Base Prompt",
|
594 |
lines=5,
|
595 |
+
placeholder="Enter special instructions for OpenAI..."
|
596 |
)
|
597 |
|
598 |
+
generate_text_button = gr.Button("β¨ Enhance with AI", variant="primary", elem_classes="openai-button")
|
599 |
|
600 |
text_output = gr.Textbox(
|
601 |
+
label="π― AI Enhanced Result",
|
602 |
lines=10,
|
603 |
elem_classes="output-container"
|
604 |
)
|
605 |
|
606 |
+
# Event handlers
|
607 |
def create_caption(image):
|
608 |
if image is not None:
|
609 |
return florence_caption(image)
|
|
|
636 |
outputs=text_output
|
637 |
)
|
638 |
|
639 |
+
# Show/hide compression strength based on compress checkbox
|
640 |
compress.change(
|
641 |
lambda x: gr.update(visible=x),
|
642 |
inputs=[compress],
|
643 |
outputs=[compression_level]
|
644 |
)
|
645 |
|
646 |
+
# Global option change function
|
647 |
def update_all_options(choice):
|
648 |
updates = []
|
649 |
dropdown_list = [
|
|
|
658 |
updates = [gr.update(value="random") for _ in dropdown_list]
|
659 |
else: # No Figure Random
|
660 |
updates = []
|
661 |
+
# Character-related settings are disabled
|
662 |
character_dropdowns = [photo_type, body_types, default_tags, roles, hairstyles, clothing, pose, additional_details]
|
663 |
other_dropdowns = [artform, place, lighting, composition, background, photography_styles, device, photographer, artist, digital_artform]
|
664 |
|