aliceblue11 commited on
Commit
26548b4
·
verified ·
1 Parent(s): 76e32d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -452
app.py CHANGED
@@ -1,473 +1,77 @@
1
- import gradio as gr
2
- import random
3
- import json
4
- import os
5
- import re
6
- from datetime import datetime
7
- from huggingface_hub import InferenceClient
8
- import subprocess
9
- import torch
10
- from PIL import Image
11
- from transformers import AutoProcessor, AutoModelForCausalLM
12
- import random
13
-
14
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
15
-
16
- huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
17
-
18
-
19
- # Initialize Florence model
20
- device = "cuda" if torch.cuda.is_available() else "cpu"
21
- florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()
22
- florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)
23
-
24
- # Florence caption function
25
- def florence_caption(image):
26
- if not isinstance(image, Image.Image):
27
- image = Image.fromarray(image)
28
-
29
- inputs = florence_processor(text="<MORE_DETAILED_CAPTION>", images=image, return_tensors="pt").to(device)
30
- generated_ids = florence_model.generate(
31
- input_ids=inputs["input_ids"],
32
- pixel_values=inputs["pixel_values"],
33
- max_new_tokens=1024,
34
- early_stopping=False,
35
- do_sample=False,
36
- num_beams=3,
37
- )
38
- generated_text = florence_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
39
- parsed_answer = florence_processor.post_process_generation(
40
- generated_text,
41
- task="<MORE_DETAILED_CAPTION>",
42
- image_size=(image.width, image.height)
43
- )
44
- return parsed_answer["<MORE_DETAILED_CAPTION>"]
45
-
46
- # Load JSON files
47
- def load_json_file(file_name):
48
- file_path = os.path.join("data", file_name)
49
- with open(file_path, "r") as file:
50
- return json.load(file)
51
-
52
- ARTFORM = load_json_file("artform.json")
53
- PHOTO_TYPE = load_json_file("photo_type.json")
54
- BODY_TYPES = load_json_file("body_types.json")
55
- DEFAULT_TAGS = load_json_file("default_tags.json")
56
- ROLES = load_json_file("roles.json")
57
- HAIRSTYLES = load_json_file("hairstyles.json")
58
- ADDITIONAL_DETAILS = load_json_file("additional_details.json")
59
- PHOTOGRAPHY_STYLES = load_json_file("photography_styles.json")
60
- DEVICE = load_json_file("device.json")
61
- PHOTOGRAPHER = load_json_file("photographer.json")
62
- ARTIST = load_json_file("artist.json")
63
- DIGITAL_ARTFORM = load_json_file("digital_artform.json")
64
- PLACE = load_json_file("place.json")
65
- LIGHTING = load_json_file("lighting.json")
66
- CLOTHING = load_json_file("clothing.json")
67
- COMPOSITION = load_json_file("composition.json")
68
- POSE = load_json_file("pose.json")
69
- BACKGROUND = load_json_file("background.json")
70
-
71
- class PromptGenerator:
72
- def __init__(self, seed=None):
73
- self.rng = random.Random(seed)
74
-
75
- def split_and_choose(self, input_str):
76
- choices = [choice.strip() for choice in input_str.split(",")]
77
- return self.rng.choices(choices, k=1)[0]
78
-
79
- def get_choice(self, input_str, default_choices):
80
- if input_str.lower() == "disabled":
81
- return ""
82
- elif "," in input_str:
83
- return self.split_and_choose(input_str)
84
- elif input_str.lower() == "random":
85
- return self.rng.choices(default_choices, k=1)[0]
86
- else:
87
- return input_str
88
-
89
- def clean_consecutive_commas(self, input_string):
90
- cleaned_string = re.sub(r',\s*,', ',', input_string)
91
- return cleaned_string
92
-
93
- def process_string(self, replaced, seed):
94
- replaced = re.sub(r'\s*,\s*', ',', replaced)
95
- replaced = re.sub(r',+', ',', replaced)
96
- original = replaced
97
-
98
- first_break_clipl_index = replaced.find("BREAK_CLIPL")
99
- second_break_clipl_index = replaced.find("BREAK_CLIPL", first_break_clipl_index + len("BREAK_CLIPL"))
100
-
101
- if first_break_clipl_index != -1 and second_break_clipl_index != -1:
102
- clip_content_l = replaced[first_break_clipl_index + len("BREAK_CLIPL"):second_break_clipl_index]
103
- replaced = replaced[:first_break_clipl_index].strip(", ") + replaced[second_break_clipl_index + len("BREAK_CLIPL"):].strip(", ")
104
- clip_l = clip_content_l
105
- else:
106
- clip_l = ""
107
-
108
- first_break_clipg_index = replaced.find("BREAK_CLIPG")
109
- second_break_clipg_index = replaced.find("BREAK_CLIPG", first_break_clipg_index + len("BREAK_CLIPG"))
110
-
111
- if first_break_clipg_index != -1 and second_break_clipg_index != -1:
112
- clip_content_g = replaced[first_break_clipg_index + len("BREAK_CLIPG"):second_break_clipg_index]
113
- replaced = replaced[:first_break_clipg_index].strip(", ") + replaced[second_break_clipg_index + len("BREAK_CLIPG"):].strip(", ")
114
- clip_g = clip_content_g
115
- else:
116
- clip_g = ""
117
-
118
- t5xxl = replaced
119
-
120
- original = original.replace("BREAK_CLIPL", "").replace("BREAK_CLIPG", "")
121
- original = re.sub(r'\s*,\s*', ',', original)
122
- original = re.sub(r',+', ',', original)
123
- clip_l = re.sub(r'\s*,\s*', ',', clip_l)
124
- clip_l = re.sub(r',+', ',', clip_l)
125
- clip_g = re.sub(r'\s*,\s*', ',', clip_g)
126
- clip_g = re.sub(r',+', ',', clip_g)
127
- if clip_l.startswith(","):
128
- clip_l = clip_l[1:]
129
- if clip_g.startswith(","):
130
- clip_g = clip_g[1:]
131
- if original.startswith(","):
132
- original = original[1:]
133
- if t5xxl.startswith(","):
134
- t5xxl = t5xxl[1:]
135
-
136
- return original, seed, t5xxl, clip_l, clip_g
137
-
138
- def generate_prompt(self, seed, custom, subject, artform, photo_type, body_types, default_tags, roles, hairstyles,
139
- additional_details, photography_styles, device, photographer, artist, digital_artform,
140
- place, lighting, clothing, composition, pose, background, input_image):
141
- kwargs = locals()
142
- del kwargs['self']
143
-
144
- seed = kwargs.get("seed", 0)
145
- if seed is not None:
146
- self.rng = random.Random(seed)
147
- components = []
148
- custom = kwargs.get("custom", "")
149
- if custom:
150
- components.append(custom)
151
- is_photographer = kwargs.get("artform", "").lower() == "photography" or (
152
- kwargs.get("artform", "").lower() == "random"
153
- and self.rng.choice([True, False])
154
- )
155
-
156
- subject = kwargs.get("subject", "")
157
-
158
- if is_photographer:
159
- selected_photo_style = self.get_choice(kwargs.get("photography_styles", ""), PHOTOGRAPHY_STYLES)
160
- if not selected_photo_style:
161
- selected_photo_style = "photography"
162
- components.append(selected_photo_style)
163
- if kwargs.get("photography_style", "") != "disabled" and kwargs.get("default_tags", "") != "disabled" or subject != "":
164
- components.append(" of")
165
-
166
- default_tags = kwargs.get("default_tags", "random")
167
- body_type = kwargs.get("body_types", "")
168
- if not subject:
169
- if default_tags == "random":
170
- if body_type != "disabled" and body_type != "random":
171
- selected_subject = self.get_choice(kwargs.get("default_tags", ""), DEFAULT_TAGS).replace("a ", "").replace("an ", "")
172
- components.append("a ")
173
- components.append(body_type)
174
- components.append(selected_subject)
175
- elif body_type == "disabled":
176
- selected_subject = self.get_choice(kwargs.get("default_tags", ""), DEFAULT_TAGS)
177
- components.append(selected_subject)
178
- else:
179
- body_type = self.get_choice(body_type, BODY_TYPES)
180
- components.append("a ")
181
- components.append(body_type)
182
- selected_subject = self.get_choice(kwargs.get("default_tags", ""), DEFAULT_TAGS).replace("a ", "").replace("an ", "")
183
- components.append(selected_subject)
184
- elif default_tags == "disabled":
185
- pass
186
- else:
187
- components.append(default_tags)
188
- else:
189
- if body_type != "disabled" and body_type != "random":
190
- components.append("a ")
191
- components.append(body_type)
192
- elif body_type == "disabled":
193
- pass
194
- else:
195
- body_type = self.get_choice(body_type, BODY_TYPES)
196
- components.append("a ")
197
- components.append(body_type)
198
- components.append(subject)
199
-
200
- params = [
201
- ("roles", ROLES),
202
- ("hairstyles", HAIRSTYLES),
203
- ("additional_details", ADDITIONAL_DETAILS),
204
- ]
205
- for param in params:
206
- components.append(self.get_choice(kwargs.get(param[0], ""), param[1]))
207
- for i in reversed(range(len(components))):
208
- if components[i] in PLACE:
209
- components[i] += ","
210
- break
211
- if kwargs.get("clothing", "") != "disabled" and kwargs.get("clothing", "") != "random":
212
- components.append(", dressed in ")
213
- clothing = kwargs.get("clothing", "")
214
- components.append(clothing)
215
- elif kwargs.get("clothing", "") == "random":
216
- components.append(", dressed in ")
217
- clothing = self.get_choice(kwargs.get("clothing", ""), CLOTHING)
218
- components.append(clothing)
219
-
220
- if kwargs.get("composition", "") != "disabled" and kwargs.get("composition", "") != "random":
221
- components.append(",")
222
- composition = kwargs.get("composition", "")
223
- components.append(composition)
224
- elif kwargs.get("composition", "") == "random":
225
- components.append(",")
226
- composition = self.get_choice(kwargs.get("composition", ""), COMPOSITION)
227
- components.append(composition)
228
-
229
- if kwargs.get("pose", "") != "disabled" and kwargs.get("pose", "") != "random":
230
- components.append(",")
231
- pose = kwargs.get("pose", "")
232
- components.append(pose)
233
- elif kwargs.get("pose", "") == "random":
234
- components.append(",")
235
- pose = self.get_choice(kwargs.get("pose", ""), POSE)
236
- components.append(pose)
237
- components.append("BREAK_CLIPG")
238
- if kwargs.get("background", "") != "disabled" and kwargs.get("background", "") != "random":
239
- components.append(",")
240
- background = kwargs.get("background", "")
241
- components.append(background)
242
- elif kwargs.get("background", "") == "random":
243
- components.append(",")
244
- background = self.get_choice(kwargs.get("background", ""), BACKGROUND)
245
- components.append(background)
246
-
247
- if kwargs.get("place", "") != "disabled" and kwargs.get("place", "") != "random":
248
- components.append(",")
249
- place = kwargs.get("place", "")
250
- components.append(place)
251
- elif kwargs.get("place", "") == "random":
252
- components.append(",")
253
- place = self.get_choice(kwargs.get("place", ""), PLACE)
254
- components.append(place + ",")
255
-
256
- lighting = kwargs.get("lighting", "").lower()
257
- if lighting == "random":
258
- selected_lighting = ", ".join(self.rng.sample(LIGHTING, self.rng.randint(2, 5)))
259
- components.append(",")
260
- components.append(selected_lighting)
261
- elif lighting == "disabled":
262
- pass
263
- else:
264
- components.append(", ")
265
- components.append(lighting)
266
- components.append("BREAK_CLIPG")
267
- components.append("BREAK_CLIPL")
268
- if is_photographer:
269
- if kwargs.get("photo_type", "") != "disabled":
270
- photo_type_choice = self.get_choice(kwargs.get("photo_type", ""), PHOTO_TYPE)
271
- if photo_type_choice and photo_type_choice != "random" and photo_type_choice != "disabled":
272
- random_value = round(self.rng.uniform(1.1, 1.5), 1)
273
- components.append(f", ({photo_type_choice}:{random_value}), ")
274
-
275
- params = [
276
- ("device", DEVICE),
277
- ("photographer", PHOTOGRAPHER),
278
- ]
279
- components.extend([self.get_choice(kwargs.get(param[0], ""), param[1]) for param in params])
280
- if kwargs.get("device", "") != "disabled":
281
- components[-2] = f", shot on {components[-2]}"
282
- if kwargs.get("photographer", "") != "disabled":
283
- components[-1] = f", photo by {components[-1]}"
284
- else:
285
- digital_artform_choice = self.get_choice(kwargs.get("digital_artform", ""), DIGITAL_ARTFORM)
286
- if digital_artform_choice:
287
- components.append(f"{digital_artform_choice}")
288
- if kwargs.get("artist", "") != "disabled":
289
- components.append(f"by {self.get_choice(kwargs.get('artist', ''), ARTIST)}")
290
- components.append("BREAK_CLIPL")
291
-
292
- prompt = " ".join(components)
293
- prompt = re.sub(" +", " ", prompt)
294
- replaced = prompt.replace("of as", "of")
295
- replaced = self.clean_consecutive_commas(replaced)
296
-
297
- return self.process_string(replaced, seed)
298
-
299
- def add_caption_to_prompt(self, prompt, caption):
300
- if caption:
301
- return f"{prompt}, {caption}"
302
- return prompt
303
-
304
- class HuggingFaceInferenceNode:
305
- def __init__(self):
306
- self.clients = {
307
- "Mixtral": InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"),
308
- "Mistral": InferenceClient("mistralai/Mistral-7B-Instruct-v0.3"),
309
- "Llama 3": InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct"),
310
- "Mistral-Nemo": InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
311
- }
312
- self.prompts_dir = "./prompts"
313
- os.makedirs(self.prompts_dir, exist_ok=True)
314
-
315
- def save_prompt(self, prompt):
316
- filename_text = "hf_" + prompt.split(',')[0].strip()
317
- filename_text = re.sub(r'[^\w\-_\. ]', '_', filename_text)
318
- filename_text = filename_text[:30]
319
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
320
- base_filename = f"{filename_text}_{timestamp}.txt"
321
- filename = os.path.join(self.prompts_dir, base_filename)
322
-
323
- with open(filename, "w") as file:
324
- file.write(prompt)
325
-
326
- print(f"Prompt saved to {filename}")
327
-
328
- def generate(self, model, input_text, happy_talk, compress, compression_level, poster, custom_base_prompt=""):
329
- try:
330
- client = self.clients[model]
331
-
332
- default_happy_prompt = """Create a detailed visually descriptive caption of this description, which will be used as a prompt for a text to image AI system (caption only, no instructions like "create an image").Remove any mention of digital artwork or artwork style. Give detailed visual descriptions of the character(s), including ethnicity, skin tone, expression etc. Imagine using keywords for a still for someone who has aphantasia. Describe the image style, e.g. any photographic or art styles / techniques utilized. Make sure to fully describe all aspects of the cinematography, with abundant technical details and visual descriptions. If there is more than one image, combine the elements and characters from all of the images creatively into a single cohesive composition with a single background, inventing an interaction between the characters. Be creative in combining the characters into a single cohesive scene. Focus on two primary characters (or one) and describe an interesting interaction between them, such as a hug, a kiss, a fight, giving an object, an emotional reaction / interaction. If there is more than one background in the images, pick the most appropriate one. Your output is only the caption itself, no comments or extra formatting. The caption is in a single long paragraph. If you feel the images are inappropriate, invent a new scene / characters inspired by these. Additionally, incorporate a specific movie director's visual style and describe the lighting setup in detail, including the type, color, and placement of light sources to create the desired mood and atmosphere. Always frame the scene, including details about the film grain, color grading, and any artifacts or characteristics specific."""
333
-
334
- default_simple_prompt = """Create a brief, straightforward caption for this description, suitable for a text-to-image AI system. Focus on the main elements, key characters, and overall scene without elaborate details. Provide a clear and concise description in one or two sentences."""
335
-
336
- poster_prompt = """Analyze the provided description and extract key information to create a movie poster style description. Format the output as follows:
337
- Title: A catchy, intriguing title that captures the essence of the scene, place the title in "".
338
- Main character: Give a description of the main character.
339
- Background: Describe the background in detail.
340
- Supporting characters: Describe the supporting characters
341
- Branding type: Describe the branding type
342
- Tagline: Include a tagline that captures the essence of the movie.
343
- Visual style: Ensure that the visual style fits the branding type and tagline.
344
- You are allowed to make up film and branding names, and do them like 80's, 90's or modern movie posters."""
345
-
346
- if poster:
347
- base_prompt = poster_prompt
348
- elif custom_base_prompt.strip():
349
- base_prompt = custom_base_prompt
350
- else:
351
- base_prompt = default_happy_prompt if happy_talk else default_simple_prompt
352
-
353
- if compress and not poster:
354
- compression_chars = {
355
- "soft": 600 if happy_talk else 300,
356
- "medium": 400 if happy_talk else 200,
357
- "hard": 200 if happy_talk else 100
358
- }
359
- char_limit = compression_chars[compression_level]
360
- base_prompt += f" Compress the output to be concise while retaining key visual details. MAX OUTPUT SIZE no more than {char_limit} characters."
361
-
362
- messages = f"<|im_start|>system\nYou are a helpful assistant. Try your best to give best response possible to user.<|im_end|>"
363
- messages += f"\n<|im_start|>user\n{base_prompt}\nDescription: {input_text}<|im_end|>\n<|im_start|>assistant\n"
364
-
365
- stream = client.text_generation(messages, max_new_tokens=4000, do_sample=True, stream=True, details=True, return_full_text=False)
366
- output = ""
367
- for response in stream:
368
- if not response.token.text == "<|im_end|>":
369
- output += response.token.text
370
-
371
- # Remove specific tokens based on the model
372
- if model == "Llama 3":
373
- output = output.rstrip("<|eot_id|>")
374
- elif model == "Mistral":
375
- output = output.rstrip("</s>")
376
- elif model == "Mistral-Nemo":
377
- output = output.rstrip("<|im_end|></s>")
378
-
379
- # Clean up the output
380
- if ": " in output:
381
- output = output.split(": ", 1)[1].strip()
382
- elif output.lower().startswith("here"):
383
- sentences = output.split(". ")
384
- if len(sentences) > 1:
385
- output = ". ".join(sentences[1:]).strip()
386
-
387
- self.save_prompt(output)
388
- return output
389
-
390
- except Exception as e:
391
- print(f"An error occurred: {e}")
392
- return f"Error occurred while processing the request: {str(e)}"
393
-
394
- title = """<h1 align="center">FLUX Prompt Generator</h1>
395
- <p><center>
396
- <a href="https://github.com/dagthomas/comfyui_dagthomas" target="_blank">[comfyui_dagthomas]</a>
397
- <a href="https://github.com/dagthomas" target="_blank">[dagthomas Github]</a>
398
- <p align="center">Create long prompts from images or simple words. Enhance your short prompts with prompt enhancer.</p>
399
- </center></p>
400
- """
401
-
402
  def create_interface():
403
  prompt_generator = PromptGenerator()
404
  huggingface_node = HuggingFaceInferenceNode()
405
 
406
  with gr.Blocks(theme='Nymbo/Nymbo_Theme') as demo:
407
 
408
- gr.HTML(title)
 
 
 
 
 
409
 
410
  with gr.Row():
411
  with gr.Column(scale=2):
412
- with gr.Accordion("Basic Settings"):
413
- seed = gr.Number(label="Seed", value=random.randint(0, 1000000))
414
- custom = gr.Textbox(label="Custom Input Prompt (optional)")
415
- subject = gr.Textbox(label="Subject (optional)")
416
 
417
- # Add the radio button for global option selection
418
- global_option = gr.Radio(["disabled", "random"], label="Set all options to:", value="disabled")
419
 
420
- with gr.Accordion("Artform and Photo Type", open=False):
421
- artform = gr.Dropdown(["disabled", "random"] + ARTFORM, label="Artform", value="disabled")
422
- photo_type = gr.Dropdown(["disabled", "random"] + PHOTO_TYPE, label="Photo Type", value="disabled")
423
 
424
- with gr.Accordion("Character Details", open=False):
425
- body_types = gr.Dropdown(["disabled", "random"] + BODY_TYPES, label="Body Types", value="disabled")
426
- default_tags = gr.Dropdown(["disabled", "random"] + DEFAULT_TAGS, label="Default Tags", value="disabled")
427
- roles = gr.Dropdown(["disabled", "random"] + ROLES, label="Roles", value="disabled")
428
- hairstyles = gr.Dropdown(["disabled", "random"] + HAIRSTYLES, label="Hairstyles", value="disabled")
429
- clothing = gr.Dropdown(["disabled", "random"] + CLOTHING, label="Clothing", value="disabled")
430
 
431
- with gr.Accordion("Scene Details", open=False):
432
- place = gr.Dropdown(["disabled", "random"] + PLACE, label="Place", value="disabled")
433
- lighting = gr.Dropdown(["disabled", "random"] + LIGHTING, label="Lighting", value="disabled")
434
- composition = gr.Dropdown(["disabled", "random"] + COMPOSITION, label="Composition", value="disabled")
435
- pose = gr.Dropdown(["disabled", "random"] + POSE, label="Pose", value="disabled")
436
- background = gr.Dropdown(["disabled", "random"] + BACKGROUND, label="Background", value="disabled")
437
 
438
- with gr.Accordion("Style and Artist", open=False):
439
- additional_details = gr.Dropdown(["disabled", "random"] + ADDITIONAL_DETAILS, label="Additional Details", value="disabled")
440
- photography_styles = gr.Dropdown(["disabled", "random"] + PHOTOGRAPHY_STYLES, label="Photography Styles", value="disabled")
441
- device = gr.Dropdown(["disabled", "random"] + DEVICE, label="Device", value="disabled")
442
- photographer = gr.Dropdown(["disabled", "random"] + PHOTOGRAPHER, label="Photographer", value="disabled")
443
- artist = gr.Dropdown(["disabled", "random"] + ARTIST, label="Artist", value="disabled")
444
- digital_artform = gr.Dropdown(["disabled", "random"] + DIGITAL_ARTFORM, label="Digital Artform", value="disabled")
445
 
446
- generate_button = gr.Button("Generate Prompt")
447
 
448
  with gr.Column(scale=2):
449
- with gr.Accordion("Image and Caption", open=False):
450
- input_image = gr.Image(label="Input Image (optional)")
451
- caption_output = gr.Textbox(label="Generated Caption", lines=3)
452
- create_caption_button = gr.Button("Create Caption")
453
- add_caption_button = gr.Button("Add Caption to Prompt")
454
-
455
- with gr.Accordion("Prompt Generation", open=True):
456
- output = gr.Textbox(label="Generated Prompt / Input Text", lines=4)
457
- t5xxl_output = gr.Textbox(label="T5XXL Output", visible=True)
458
- clip_l_output = gr.Textbox(label="CLIP L Output", visible=True)
459
- clip_g_output = gr.Textbox(label="CLIP G Output", visible=True)
460
 
461
  with gr.Column(scale=2):
462
- with gr.Accordion("Prompt Generation with LLM", open=False):
463
- model = gr.Dropdown(["Mixtral", "Mistral", "Llama 3", "Mistral-Nemo"], label="Model", value="Llama 3")
464
- happy_talk = gr.Checkbox(label="Happy Talk", value=True)
465
- compress = gr.Checkbox(label="Compress", value=True)
466
- compression_level = gr.Radio(["soft", "medium", "hard"], label="Compression Level", value="hard")
467
- poster = gr.Checkbox(label="Poster", value=False)
468
- custom_base_prompt = gr.Textbox(label="Custom Base Prompt", lines=5)
469
- generate_text_button = gr.Button("Generate Prompt with LLM")
470
- text_output = gr.Textbox(label="Generated Text", lines=10)
471
 
472
  def create_caption(image):
473
  if image is not None:
@@ -521,4 +125,4 @@ def create_interface():
521
 
522
  if __name__ == "__main__":
523
  demo = create_interface()
524
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  def create_interface():
2
  prompt_generator = PromptGenerator()
3
  huggingface_node = HuggingFaceInferenceNode()
4
 
5
  with gr.Blocks(theme='Nymbo/Nymbo_Theme') as demo:
6
 
7
+ gr.HTML("""<h1 align="center">FLUX 프롬프트 생성기</h1>
8
+ <p><center>
9
+ <a href="https://github.com/dagthomas/comfyui_dagthomas" target="_blank">[comfyui_dagthomas]</a>
10
+ <a href="https://github.com/dagthomas" target="_blank">[dagthomas Github]</a>
11
+ <p align="center">이미지 또는 간단한 텍스트에서 긴 프롬프트를 생성합니다. 짧은 프롬프트를 개선합니다.</p>
12
+ </center></p>""")
13
 
14
  with gr.Row():
15
  with gr.Column(scale=2):
16
+ with gr.Accordion("기본 설정"):
17
+ seed = gr.Number(label="시드", value=random.randint(0, 1000000))
18
+ custom = gr.Textbox(label="사용자 정의 입력 프롬프트 (선택사항)")
19
+ subject = gr.Textbox(label="주제 (선택사항)")
20
 
21
+ # 글로벌 옵션 선택을 위한 라디오 버튼 추가
22
+ global_option = gr.Radio(["비활성화", "랜덤"], label="모든 옵션 설정:", value="비활성화")
23
 
24
+ with gr.Accordion("예술 형식 사진 유형", open=False):
25
+ artform = gr.Dropdown(["비활성화", "랜덤"] + ARTFORM, label="예술 형식", value="비활성화")
26
+ photo_type = gr.Dropdown(["비활성화", "랜덤"] + PHOTO_TYPE, label="사진 유형", value="비활성화")
27
 
28
+ with gr.Accordion("캐릭터 세부사항", open=False):
29
+ body_types = gr.Dropdown(["비활성화", "랜덤"] + BODY_TYPES, label="체형", value="비활성화")
30
+ default_tags = gr.Dropdown(["비활성화", "랜덤"] + DEFAULT_TAGS, label="기본 태그", value="비활성화")
31
+ roles = gr.Dropdown(["비활성화", "랜덤"] + ROLES, label="역할", value="비활성화")
32
+ hairstyles = gr.Dropdown(["비활성화", "랜덤"] + HAIRSTYLES, label="헤어스타일", value="비활성화")
33
+ clothing = gr.Dropdown(["비활성화", "랜덤"] + CLOTHING, label="의상", value="비활성화")
34
 
35
+ with gr.Accordion("장면 세부사항", open=False):
36
+ place = gr.Dropdown(["비활성화", "랜덤"] + PLACE, label="장소", value="비활성화")
37
+ lighting = gr.Dropdown(["비활성화", "랜덤"] + LIGHTING, label="조명", value="비활성화")
38
+ composition = gr.Dropdown(["비활성화", "랜덤"] + COMPOSITION, label="구성", value="비활성화")
39
+ pose = gr.Dropdown(["비활성화", "랜덤"] + POSE, label="포즈", value="비활성화")
40
+ background = gr.Dropdown(["비활성화", "랜덤"] + BACKGROUND, label="배경", value="비활성화")
41
 
42
+ with gr.Accordion("스타일 아티스트", open=False):
43
+ additional_details = gr.Dropdown(["비활성화", "랜덤"] + ADDITIONAL_DETAILS, label="추가 세부 사항", value="비활성화")
44
+ photography_styles = gr.Dropdown(["비활성화", "랜덤"] + PHOTOGRAPHY_STYLES, label="사진 스타일", value="비활성화")
45
+ device = gr.Dropdown(["비활성화", "랜덤"] + DEVICE, label="장비", value="비활성화")
46
+ photographer = gr.Dropdown(["비활성화", "랜덤"] + PHOTOGRAPHER, label="사진작가", value="비활성화")
47
+ artist = gr.Dropdown(["비활성화", "랜덤"] + ARTIST, label="아티스트", value="비활성화")
48
+ digital_artform = gr.Dropdown(["비활성화", "랜덤"] + DIGITAL_ARTFORM, label="디지털 예술 형식", value="비활성화")
49
 
50
+ generate_button = gr.Button("프롬프트 생성")
51
 
52
  with gr.Column(scale=2):
53
+ with gr.Accordion("이미지 설명", open=False):
54
+ input_image = gr.Image(label="입력 이미지 (선택사항)")
55
+ caption_output = gr.Textbox(label="생성된 설명", lines=3)
56
+ create_caption_button = gr.Button("설명 생성")
57
+ add_caption_button = gr.Button("프롬프트에 설명 추가")
58
+
59
+ with gr.Accordion("프롬프트 생성", open=True):
60
+ output = gr.Textbox(label="생성된 프롬프트 / 입력 텍스트", lines=4)
61
+ t5xxl_output = gr.Textbox(label="T5XXL 출력", visible=True)
62
+ clip_l_output = gr.Textbox(label="CLIP L 출력", visible=True)
63
+ clip_g_output = gr.Textbox(label="CLIP G 출력", visible=True)
64
 
65
  with gr.Column(scale=2):
66
+ with gr.Accordion("LLM을 사용한 프롬프트 생성", open=False):
67
+ model = gr.Dropdown(["Mixtral", "Mistral", "Llama 3", "Mistral-Nemo"], label="모델", value="Llama 3")
68
+ happy_talk = gr.Checkbox(label="행복한 대화", value=True)
69
+ compress = gr.Checkbox(label="압축", value=True)
70
+ compression_level = gr.Radio(["부드럽게", "중간", "강하게"], label="압축 레벨", value="강하게")
71
+ poster = gr.Checkbox(label="포스터 형식", value=False)
72
+ custom_base_prompt = gr.Textbox(label="사용자 정의 기본 프롬프트", lines=5)
73
+ generate_text_button = gr.Button("LLM으로 프롬프트 생성")
74
+ text_output = gr.Textbox(label="생성된 텍스트", lines=10)
75
 
76
  def create_caption(image):
77
  if image is not None:
 
125
 
126
  if __name__ == "__main__":
127
  demo = create_interface()
128
+ demo.launch()