ginipick commited on
Commit
1e367e3
·
verified ·
1 Parent(s): 0399de8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +182 -134
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import time
3
  from os import path
4
  import tempfile
@@ -13,19 +14,20 @@ import string
13
  import torch
14
  from PIL import Image
15
 
 
16
  from safetensors.torch import load_file
17
  from huggingface_hub import hf_hub_download
18
 
19
- # Diffusers 관련 라이브러리
20
  import gradio as gr
21
  from diffusers import FluxPipeline
22
 
23
- # Google GenAI 라이브러리
24
  from google import genai
25
  from google.genai import types
26
 
27
  #######################################
28
- # 0. 환경설정
29
  #######################################
30
 
31
  BASE_DIR = path.dirname(path.abspath(__file__)) if "__file__" in globals() else os.getcwd()
@@ -35,6 +37,25 @@ os.environ["TRANSFORMERS_CACHE"] = CACHE_PATH
35
  os.environ["HF_HUB_CACHE"] = CACHE_PATH
36
  os.environ["HF_HOME"] = CACHE_PATH
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  class timer:
39
  def __init__(self, method_name="timed process"):
40
  self.method = method_name
@@ -46,7 +67,7 @@ class timer:
46
  print(f"[TIMER] {self.method} took {round(end - self.start, 2)}s")
47
 
48
  #######################################
49
- # 1. FLUX 파이프라인 로드
50
  #######################################
51
 
52
  if not path.exists(CACHE_PATH):
@@ -63,7 +84,7 @@ pipe.fuse_lora(lora_scale=0.125)
63
  pipe.to(device="cuda", dtype=torch.bfloat16)
64
 
65
  #######################################
66
- # 2. Google GenAI (Gemini) - 이미지 변환 함수
67
  #######################################
68
 
69
  def save_binary_file(file_name, data):
@@ -71,12 +92,14 @@ def save_binary_file(file_name, data):
71
  f.write(data)
72
 
73
  def generate_by_google_genai(text, file_name, model="gemini-2.0-flash-exp"):
74
- """Gemini 모델을 통해 이미지 내부 텍스트를 변경."""
 
 
 
75
  api_key = os.getenv("GAPI_TOKEN", None)
76
  if not api_key:
77
  raise ValueError(
78
- "GAPI_TOKEN 환경 변수가 설정되지 않았습니다. "
79
- "Google GenAI API를 사용하기 위해서는 GAPI_TOKEN이 필요합니다."
80
  )
81
 
82
  client = genai.Client(api_key=api_key)
@@ -120,7 +143,7 @@ def generate_by_google_genai(text, file_name, model="gemini-2.0-flash-exp"):
120
  candidate = chunk.candidates[0].content.parts[0]
121
  if candidate.inline_data:
122
  save_binary_file(temp_path, candidate.inline_data.data)
123
- print(f"[DEBUG] Gemini returned image -> {temp_path}")
124
  image_path = temp_path
125
  break
126
  else:
@@ -129,30 +152,49 @@ def generate_by_google_genai(text, file_name, model="gemini-2.0-flash-exp"):
129
  del files
130
  return image_path, text_response
131
 
 
132
  #######################################
133
- # 3. Diffusion (Flux)용 함수
134
  #######################################
135
 
136
  def generate_random_letters(length: int) -> str:
137
- """length 길이만큼 대소문자 알파벳을 무작위로 생성."""
 
 
138
  letters = string.ascii_lowercase + string.ascii_uppercase
139
  return "".join(random.choice(letters) for _ in range(length))
140
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  def fill_prompt_with_random_texts(prompt: str, r1: str, r2: str, r3: str) -> str:
142
  """
143
- 프롬프트 <text1>, <text2>, <text3>를
144
- 각각 r1, r2, r3로 치환.
145
- - <text1>은 필수 (없으면 자동으로 뒤에 붙임).
146
- - <text2>, <text3>는 있으면 치환, 없으면 무시.
147
  """
148
- # 1) <text1>은 필수
149
  if "<text1>" in prompt:
150
  prompt = prompt.replace("<text1>", r1)
151
  else:
152
- # 자동 덧붙임
153
  prompt = f"{prompt} with clear readable text that says '{r1}'"
154
 
155
- # 2) <text2>, <text3>는 선택
156
  if "<text2>" in prompt:
157
  prompt = prompt.replace("<text2>", r2)
158
  if "<text3>" in prompt:
@@ -160,9 +202,9 @@ def fill_prompt_with_random_texts(prompt: str, r1: str, r2: str, r3: str) -> str
160
 
161
  return prompt
162
 
163
- def generate_initial_image(prompt, random1, random2, random3, height, width, steps, scale, seed):
164
  """
165
- Flux 파이프라인을 이용해 (r1, r2, r3)가 들어간 이미지를 생성.
166
  """
167
  with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("Flux Generation"):
168
  result = pipe(
@@ -177,50 +219,56 @@ def generate_initial_image(prompt, random1, random2, random3, height, width, ste
177
  return result
178
 
179
 
180
- def change_multi_text_in_image(original_image, random1, final1, random2, final2, random3, final3):
 
 
 
 
181
  """
182
- Gemini를 통해, 이미지 안의 r1->final1, r2->final2, r3->final3 식으로 텍스트 교체.
183
- - r2, final2 (또는 r3, final3)가 빈 문자열이면 해당 교체는 건너뜀.
184
  """
185
- # 교체 지시문 만들기
186
  instructions = []
187
- if random1 and final1:
188
- instructions.append(f"Change any text reading '{random1}' in this image to '{final1}'.")
189
- if random2 and final2:
190
- instructions.append(f"Change any text reading '{random2}' in this image to '{final2}'.")
191
- if random3 and final3:
192
- instructions.append(f"Change any text reading '{random3}' in this image to '{final3}'.")
193
-
194
- # 만약 교체 지시문이 없다면 그냥 return original_image
195
- if not instructions:
196
- print("[WARN] No text changes requested!")
197
- return original_image
198
-
199
- full_instruction = " ".join(instructions)
200
- try:
201
- # 임시 파일에 original_image 저장
202
- with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
203
- original_path = tmp.name
204
- original_image.save(original_path)
205
-
206
- image_path, text_response = generate_by_google_genai(
207
- text=full_instruction,
208
- file_name=original_path
209
- )
210
- if image_path:
211
- with open(image_path, "rb") as f:
212
- image_data = f.read()
213
- new_img = Image.open(io.BytesIO(image_data))
214
- return new_img
215
- else:
216
- # 이미지 없이 텍스트만 온 경우
217
- print("[WARN] Gemini returned only text:", text_response)
218
- return original_image
219
- except Exception as e:
220
- raise gr.Error(f"Error: {e}")
 
 
 
 
221
 
222
  #######################################
223
- # 4. 메인 프로세스 함수
224
  #######################################
225
 
226
  def run_process(
@@ -235,121 +283,121 @@ def run_process(
235
  seed
236
  ):
237
  """
238
- 1) final_text1(필수), final_text2, final_text3(옵션) 각각 길이에 맞춰 무작위 알파벳 만들기.
239
- 2) prompt <text1>, <text2>, <text3> 치환 -> Flux로 1차(랜덤) 이미지.
240
- 3) Gemini 호출 -> r1->final_text1, r2->final_text2, r3->final_text3 교체 -> 최종 이미지.
 
 
241
  """
242
- # (A) 무작위 알파벳
243
- r1 = generate_random_letters(len(final_text1)) if final_text1 else ""
244
- r2 = generate_random_letters(len(final_text2)) if final_text2 else ""
245
- r3 = generate_random_letters(len(final_text3)) if final_text3 else ""
246
 
247
- # (B) 프롬프트 치환
248
- final_prompt = fill_prompt_with_random_texts(prompt, r1, r2, r3)
249
- print(f"[DEBUG] final_prompt = {final_prompt}")
 
250
 
251
- # (C) 1차 이미지 (랜덤 텍스트)
252
- random_image = generate_initial_image(final_prompt, r1, r2, r3, height, width, steps, scale, seed)
253
 
254
- # (D) 2차 이미지 (실제 텍스트)
255
- final_image = change_multi_text_in_image(
256
- random_image,
257
- r1, final_text1,
258
- r2, final_text2,
259
- r3, final_text3
260
- )
261
 
262
- return [random_image, final_image]
 
 
 
 
263
 
264
  #######################################
265
- # 5. Gradio UI
266
  #######################################
267
 
268
- with gr.Blocks(title="Flux + Google GenAI (Up to 3 Text placeholders)") as demo:
269
  gr.Markdown(
270
  """
271
- # Flux + Google GenAI: 최대 3개의 `<text>` 교체
272
-
273
- ## 사용 방법
274
- 1. 아래 Prompt에 `<text1>`, `<text2>`, `<text3>`를 최대 3개까지 배치 가능.
275
- - 예) "A poster with <text1> in large letters, also <text2> in the corner"
276
- - **<text1>은 필수**(없으면 자동으로 문구가 뒤에 붙음)
277
- - <text2>, <text3>는 넣어도 되고, 안 넣어도 됨.
278
- 2. "New Text #1" (필수), "New Text #2", "New Text #3"를 입력.
279
- - #2, #3는 비워 두면 해당 자리 교체 없음.
280
- 3. "Generate Images" 버튼 →
281
- (1) `<text1>`, `<text2>`, `<text3>` 자리에 (또는 자동으로) **무작위 알파벳** 넣은 1차 이미지 생성
282
- (2) 이어 Gemini 모델을 통해 무작위 알파벳 → 실제 "New Text #1/2/3" 변경한 2차 이미지
283
- - **두 이미지**(랜덤 텍스트 → 최종 텍스트)가 순서대로 출력됩니다.
284
-
285
- ---
286
  """
287
  )
288
 
289
- # 예시 5개
290
  examples = [
291
  [
292
- "A futuristic billboard shows <text1> and a small sign <text2> on the left side. <text3> is a hidden watermark.",
293
- "HELLO", "WELCOME", "2025"
294
  ],
295
  [
296
- "A fantasy poster with <text1> and <text2> in stylized letters, plus a tiny note <text3> at the bottom.",
297
- "Dragons", "MagicRealm", "Beware!"
298
  ],
299
  [
300
- "A neon sign reading <text1>, with a secondary text <text2> below. <text3> might appear in the corner.",
301
- "OPEN", "24HOUR", "NoSmoking"
302
  ],
303
  [
304
- "A big invitation card with main text <text1>, subtitle <text2>, signature <text3> in cursive.",
305
- "Birthday Party", "Today Only", "From Your Friend"
306
  ],
307
  [
308
- "A large graffiti wall with <text1> in bold letters, plus <text2> and <text3> near the edges.",
309
- "FREEDOM", "HOPE", "LOVE"
310
- ]
311
  ]
312
 
313
  with gr.Row():
314
  with gr.Column():
315
- prompt_input = gr.Textbox(
316
- lines=3,
317
- label="Prompt (use `<text1>`, `<text2>`, `<text3>` as needed)",
318
- placeholder="Ex) A poster with <text1>, plus a line <text2>, etc."
319
- )
320
- final_text1 = gr.Textbox(
321
- label="New Text #1 (Required)",
322
- placeholder="Ex) HELLO"
323
- )
324
- final_text2 = gr.Textbox(
325
- label="New Text #2 (Optional)",
326
- placeholder="Ex) WELCOME"
327
- )
328
- final_text3 = gr.Textbox(
329
- label="New Text #3 (Optional)",
330
- placeholder="Ex) 2025 or anything"
331
- )
332
-
333
- with gr.Accordion("Advanced Settings", open=False):
 
334
  height = gr.Slider(label="Height", minimum=256, maximum=1152, step=64, value=512)
335
  width = gr.Slider(label="Width", minimum=256, maximum=1152, step=64, value=512)
336
  steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8)
337
  scale = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=10.0, step=0.5, value=3.5)
338
- seed = gr.Number(label="Seed (reproducibility)", value=1234, precision=0)
339
 
340
- run_btn = gr.Button("Generate Images", variant="primary")
341
 
342
  gr.Examples(
343
  examples=examples,
344
  inputs=[prompt_input, final_text1, final_text2, final_text3],
345
- label="Click to load example"
346
  )
347
 
348
  with gr.Column():
349
- random_image_output = gr.Image(label="1) Random Text Image", type="pil")
350
- final_image_output = gr.Image(label="2) Final Text Image", type="pil")
351
 
352
- # 버튼 액션
353
  run_btn.click(
354
  fn=run_process,
355
  inputs=[
@@ -363,7 +411,7 @@ with gr.Blocks(title="Flux + Google GenAI (Up to 3 Text placeholders)") as demo:
363
  scale,
364
  seed
365
  ],
366
- outputs=[random_image_output, final_image_output]
367
  )
368
 
369
  demo.launch(max_threads=20)
 
1
  import os
2
+ import re
3
  import time
4
  from os import path
5
  import tempfile
 
14
  import torch
15
  from PIL import Image
16
 
17
+ from transformers import pipeline
18
  from safetensors.torch import load_file
19
  from huggingface_hub import hf_hub_download
20
 
21
+ # Diffusers
22
  import gradio as gr
23
  from diffusers import FluxPipeline
24
 
25
+ # (Internal) text-modification library
26
  from google import genai
27
  from google.genai import types
28
 
29
  #######################################
30
+ # 0. Environment & Translation Pipeline
31
  #######################################
32
 
33
  BASE_DIR = path.dirname(path.abspath(__file__)) if "__file__" in globals() else os.getcwd()
 
37
  os.environ["HF_HUB_CACHE"] = CACHE_PATH
38
  os.environ["HF_HOME"] = CACHE_PATH
39
 
40
+ # Translation (Korean -> English), CPU only
41
+ translator = pipeline(
42
+ task="translation",
43
+ model="Helsinki-NLP/opus-mt-ko-en",
44
+ device=-1 # force CPU
45
+ )
46
+
47
+ def maybe_translate_to_english(text: str) -> str:
48
+ """
49
+ If the prompt contains any Korean characters, translate to English.
50
+ Otherwise, return as-is.
51
+ """
52
+ if re.search("[가-힣]", text):
53
+ translated = translator(text)[0]["translation_text"]
54
+ print(f"[TRANSLATE] Detected Korean -> '{text}' -> '{translated}'")
55
+ return translated
56
+ return text
57
+
58
+ # Simple Timer Class
59
  class timer:
60
  def __init__(self, method_name="timed process"):
61
  self.method = method_name
 
67
  print(f"[TIMER] {self.method} took {round(end - self.start, 2)}s")
68
 
69
  #######################################
70
+ # 1. Load FLUX Pipeline
71
  #######################################
72
 
73
  if not path.exists(CACHE_PATH):
 
84
  pipe.to(device="cuda", dtype=torch.bfloat16)
85
 
86
  #######################################
87
+ # 2. Internal Text Modification Functions
88
  #######################################
89
 
90
  def save_binary_file(file_name, data):
 
92
  f.write(data)
93
 
94
  def generate_by_google_genai(text, file_name, model="gemini-2.0-flash-exp"):
95
+ """
96
+ Internally modifies text within an image, returning a new image path.
97
+ (Screen instructions do not mention 'Google'.)
98
+ """
99
  api_key = os.getenv("GAPI_TOKEN", None)
100
  if not api_key:
101
  raise ValueError(
102
+ "GAPI_TOKEN is missing. Please set an API key."
 
103
  )
104
 
105
  client = genai.Client(api_key=api_key)
 
143
  candidate = chunk.candidates[0].content.parts[0]
144
  if candidate.inline_data:
145
  save_binary_file(temp_path, candidate.inline_data.data)
146
+ print(f"[DEBUG] Returned new image -> {temp_path}")
147
  image_path = temp_path
148
  break
149
  else:
 
152
  del files
153
  return image_path, text_response
154
 
155
+
156
  #######################################
157
+ # 3. Diffusion Utility
158
  #######################################
159
 
160
  def generate_random_letters(length: int) -> str:
161
+ """
162
+ Create a random sequence of uppercase/lowercase letters of given length.
163
+ """
164
  letters = string.ascii_lowercase + string.ascii_uppercase
165
  return "".join(random.choice(letters) for _ in range(length))
166
 
167
+ def is_all_english(text: str) -> bool:
168
+ """
169
+ Check if text consists only of English letters (a-z, A-Z), digits, spaces,
170
+ and a few basic punctuation characters. If so, return True.
171
+ Otherwise, False (includes Korean or other characters).
172
+ """
173
+ return bool(re.match(r'^[a-zA-Z0-9\s\.,!\?\']*$', text))
174
+
175
+ def maybe_use_random_or_original(final_text: str) -> str:
176
+ """
177
+ If final_text is strictly English/allowed chars, use it as-is.
178
+ If it contains other chars (like Korean, etc.),
179
+ replace with random letters of the same length.
180
+ """
181
+ if not final_text:
182
+ return ""
183
+ if is_all_english(final_text):
184
+ return final_text
185
+ else:
186
+ return generate_random_letters(len(final_text))
187
+
188
  def fill_prompt_with_random_texts(prompt: str, r1: str, r2: str, r3: str) -> str:
189
  """
190
+ Replace <text1>, <text2>, <text3> with r1, r2, r3 respectively.
191
+ <text1> is required; if missing, we append something.
 
 
192
  """
 
193
  if "<text1>" in prompt:
194
  prompt = prompt.replace("<text1>", r1)
195
  else:
 
196
  prompt = f"{prompt} with clear readable text that says '{r1}'"
197
 
 
198
  if "<text2>" in prompt:
199
  prompt = prompt.replace("<text2>", r2)
200
  if "<text3>" in prompt:
 
202
 
203
  return prompt
204
 
205
+ def generate_initial_image(prompt, height, width, steps, scale, seed):
206
  """
207
+ Use Flux Pipeline to generate the initial image from the prompt.
208
  """
209
  with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("Flux Generation"):
210
  result = pipe(
 
219
  return result
220
 
221
 
222
+ #######################################
223
+ # 4. Creating 2 Final Images
224
+ #######################################
225
+
226
+ def build_multi_change_instruction(r1, f1, r2, f2, r3, f3):
227
  """
228
+ Summarize instructions to replace (r1->f1), (r2->f2), (r3->f3).
 
229
  """
 
230
  instructions = []
231
+ if r1 and f1:
232
+ instructions.append(f"Change any text reading '{r1}' in this image to '{f1}'.")
233
+ if r2 and f2:
234
+ instructions.append(f"Change any text reading '{r2}' in this image to '{f2}'.")
235
+ if r3 and f3:
236
+ instructions.append(f"Change any text reading '{r3}' in this image to '{f3}'.")
237
+ if instructions:
238
+ return " ".join(instructions)
239
+ return "No text changes needed."
240
+
241
+ def change_text_in_image_two_times(original_image, instruction):
242
+ """
243
+ Call the text modification function twice,
244
+ returning 2 final variations.
245
+ """
246
+ results = []
247
+ for version_tag in ["(A)", "(B)"]:
248
+ mod_instruction = f"{instruction} {version_tag}"
249
+ try:
250
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
251
+ original_path = tmp.name
252
+ original_image.save(original_path)
253
+
254
+ image_path, text_response = generate_by_google_genai(
255
+ text=mod_instruction,
256
+ file_name=original_path
257
+ )
258
+ if image_path:
259
+ with open(image_path, "rb") as f:
260
+ image_data = f.read()
261
+ new_img = Image.open(io.BytesIO(image_data))
262
+ results.append(new_img)
263
+ else:
264
+ results.append(original_image)
265
+ except Exception as e:
266
+ raise gr.Error(f"Error: {e}")
267
+ return results
268
+
269
 
270
  #######################################
271
+ # 5. Main Process
272
  #######################################
273
 
274
  def run_process(
 
283
  seed
284
  ):
285
  """
286
+ 1) If prompt has Korean, translate to English
287
+ 2) For each <textX>, if it's purely English, use as-is,
288
+ else generate random letters of the same length.
289
+ 3) Generate initial image with these placeholders
290
+ 4) Then produce 2 final images by replacing placeholders with real texts
291
  """
292
+ prompt_en = maybe_translate_to_english(prompt)
 
 
 
293
 
294
+ # Decide random vs original for each text
295
+ r1 = maybe_use_random_or_original(final_text1)
296
+ r2 = maybe_use_random_or_original(final_text2)
297
+ r3 = maybe_use_random_or_original(final_text3)
298
 
299
+ print(f"[DEBUG] Using placeholders: r1='{r1}', r2='{r2}', r3='{r3}'")
 
300
 
301
+ # Fill prompt
302
+ final_prompt = fill_prompt_with_random_texts(prompt_en, r1, r2, r3)
303
+ print(f"[DEBUG] final_prompt = {final_prompt}")
304
+
305
+ # Generate initial "random/original" image
306
+ _random_image = generate_initial_image(final_prompt, height, width, steps, scale, seed)
 
307
 
308
+ # Build final instructions & call twice -> 2 final images
309
+ instruction = build_multi_change_instruction(r1, final_text1, r2, final_text2, r3, final_text3)
310
+ final_imgs = change_text_in_image_two_times(_random_image, instruction)
311
+ # Return only the 2 final images (don't show the random image)
312
+ return [final_imgs[0], final_imgs[1]]
313
 
314
  #######################################
315
+ # 6. Gradio UI
316
  #######################################
317
 
318
+ with gr.Blocks(title="Eevery Text Imaginator: FLUX") as demo:
319
  gr.Markdown(
320
  """
321
+ <h2 style="text-align:center; margin-bottom: 15px;">
322
+ <strong>Eevery Text Imaginator: FLUX</strong>
323
+ </h2>
324
+
325
+ <p style="text-align:center;">
326
+ This tool generates two final images from a prompt
327
+ containing placeholders <code>&lt;text1&gt;</code>, <code>&lt;text2&gt;</code>, <code>&lt;text3&gt;</code>.
328
+ If your chosen text is purely English, it will appear directly;
329
+ otherwise it becomes random letters in the initial phase.
330
+ </p>
331
+
332
+ <hr style="margin: 15px 0;">
 
 
 
333
  """
334
  )
335
 
336
+ # 5 example prompts (focusing on <text1>, <text2>)
337
  examples = [
338
  [
339
+ "On a grand stage, <text1> in big letters and <text2> on the left side",
340
+ "HELLO", "WORLD", ""
341
  ],
342
  [
343
+ "Futuristic neon sign with <text1>, plus <text2> near the bottom",
344
+ "WELCOME", "SALE", ""
345
  ],
346
  [
347
+ "A classical poster reading <text1> in bold, <text2> as a subtitle",
348
+ "MUSICFEST", "2025", ""
349
  ],
350
  [
351
+ "In a cartoon style, a speech bubble with <text1> and another text <text2>",
352
+ "HI!", "OhYes", ""
353
  ],
354
  [
355
+ "Large billboard featuring <text1>, smaller text <text2> in the corner",
356
+ "ANNOUNCEMENT", "OPENNOW", ""
357
+ ],
358
  ]
359
 
360
  with gr.Row():
361
  with gr.Column():
362
+ with gr.Box():
363
+ prompt_input = gr.Textbox(
364
+ lines=3,
365
+ label="Prompt (Korean or English)",
366
+ placeholder="On a grand stage, <text1> in big letters..."
367
+ )
368
+ final_text1 = gr.Textbox(
369
+ label="New Text #1 (Required)",
370
+ placeholder="Example: HELLO or 안녕하세요"
371
+ )
372
+ final_text2 = gr.Textbox(
373
+ label="New Text #2 (Optional)",
374
+ placeholder="Example: WORLD or 반갑습니다"
375
+ )
376
+ final_text3 = gr.Textbox(
377
+ label="New Text #3 (Optional)",
378
+ placeholder="(Leave blank if not used)"
379
+ )
380
+
381
+ with gr.Accordion("Advanced Settings (optional)", open=False):
382
  height = gr.Slider(label="Height", minimum=256, maximum=1152, step=64, value=512)
383
  width = gr.Slider(label="Width", minimum=256, maximum=1152, step=64, value=512)
384
  steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8)
385
  scale = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=10.0, step=0.5, value=3.5)
386
+ seed = gr.Number(label="Seed", value=1234, precision=0)
387
 
388
+ run_btn = gr.Button("Generate 2 Final Images", variant="primary")
389
 
390
  gr.Examples(
391
  examples=examples,
392
  inputs=[prompt_input, final_text1, final_text2, final_text3],
393
+ label="Example Prompts"
394
  )
395
 
396
  with gr.Column():
397
+ final_image_output1 = gr.Image(label="Final Image #1", type="pil")
398
+ final_image_output2 = gr.Image(label="Final Image #2", type="pil")
399
 
400
+ # We only display the 2 final images, not the initial random image
401
  run_btn.click(
402
  fn=run_process,
403
  inputs=[
 
411
  scale,
412
  seed
413
  ],
414
+ outputs=[final_image_output1, final_image_output2]
415
  )
416
 
417
  demo.launch(max_threads=20)