seawolf2357 commited on
Commit
6f263dc
ยท
verified ยท
1 Parent(s): 4d01430

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +146 -202
app.py CHANGED
@@ -1,19 +1,37 @@
1
  import random
2
  import os
3
  import uuid
 
 
4
  from datetime import datetime
 
5
  import gradio as gr
6
  import numpy as np
7
- import spaces
8
  import torch
9
  from diffusers import DiffusionPipeline
10
  from PIL import Image
11
 
12
- # Create permanent storage directory
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  SAVE_DIR = "saved_images" # Gradio will handle the persistence
14
  if not os.path.exists(SAVE_DIR):
15
  os.makedirs(SAVE_DIR, exist_ok=True)
16
 
 
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
18
  repo_id = "black-forest-labs/FLUX.1-dev"
19
  adapter_id = "seawolf2357/kim-korea" # ํŠน์ • ์ •์น˜์ธ์„ ํ•™์Šตํ•œ LoRA ๋ชจ๋ธ
@@ -25,171 +43,142 @@ pipeline = pipeline.to(device)
25
  MAX_SEED = np.iinfo(np.int32).max
26
  MAX_IMAGE_SIZE = 1024
27
 
28
- def save_generated_image(image, prompt):
29
- # Generate unique filename with timestamp
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
31
  unique_id = str(uuid.uuid4())[:8]
32
  filename = f"{timestamp}_{unique_id}.png"
33
  filepath = os.path.join(SAVE_DIR, filename)
34
-
35
- # Save the image
36
  image.save(filepath)
37
-
38
- # Save metadata
39
  metadata_file = os.path.join(SAVE_DIR, "metadata.txt")
40
  with open(metadata_file, "a", encoding="utf-8") as f:
41
  f.write(f"{filename}|{prompt}|{timestamp}\n")
42
-
43
  return filepath
44
 
45
- @spaces.GPU(duration=60)
46
- def inference(
47
- prompt,
48
- seed=42,
49
- randomize_seed=True,
50
- width=1024,
51
- height=768,
52
- guidance_scale=3.5,
53
- num_inference_steps=30,
54
- lora_scale=1.0,
55
- progress=None,
56
- ):
57
- if randomize_seed:
58
- seed = random.randint(0, MAX_SEED)
59
  generator = torch.Generator(device=device).manual_seed(int(seed))
60
-
61
- image = pipeline(
62
  prompt=prompt,
63
  guidance_scale=guidance_scale,
64
- num_inference_steps=num_inference_steps,
65
  width=width,
66
  height=height,
67
  generator=generator,
68
  joint_attention_kwargs={"scale": lora_scale},
69
  ).images[0]
70
-
71
- # Save the generated image
72
- filepath = save_generated_image(image, prompt)
73
-
74
- # Return just the image and seed
75
- return image, seed
76
 
77
- # ์˜ˆ์‹œ ๋ฌธ๊ตฌ: ํŠน์ • ์ •์น˜์ธ Mr. KIM์˜ ๋‹ค์–‘ํ•œ ์ƒํ™ฉ์„ ๋ฌ˜์‚ฌ
78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  examples = [
80
- "Mr. KIM holding up a 'Fighting!' banner with both hands, showing patriotic pride and determination for national excellence. ",
81
  "Mr. KIM raising both arms in celebration with a triumphant expression, showing victory and hope for the future.",
82
- "Mr. KIM jogging in a park wearing athletic gear, demonstrating healthy lifestyle and energetic leadership qualities.",
83
- "Mr. KIM warmly shaking hands with female citizens in a crowded street, showing genuine care and connection with women voters. ",
84
- "Mr. KIM at a campaign rally, pointing toward the horizon with an inspiring gesture while female and kids audience members applaud. ",
85
- "Mr. KIM participating in a community event, surrounded by enthusiastic female supporters cheering ",
86
- "Mr. KIM visiting a local market, engaging in friendly conversation with female vendors and shopkeepers. ",
87
- "Mr. KIM walking through a university campus, discussing education policies with female students and professors. ",
88
- "Mr. KIM delivering a powerful speech in front of a large crowd with confident gestures and determined expression. ",
89
- "Mr. KIM in a dynamic interview setting, passionately outlining his visions for the future.",
90
- "Mr. KIM preparing for an important debate, surrounded by paperwork, looking focused and resolute. ",
91
  ]
92
 
93
- # UI๋ฅผ ๋ถ‰์€ ๊ณ„์—ด ๊ทธ๋ผ๋””์—์ด์…˜์œผ๋กœ ๋””์ž์ธ
94
  custom_css = """
95
  :root {
96
- --color-primary: #8F1A3A; /* ๋ถ‰์€ ํ†ค์˜ ๋ฉ”์ธ ์ปฌ๋Ÿฌ */
97
- --color-secondary: #FF4B4B; /* ํฌ์ธํŠธ ์ปฌ๋Ÿฌ(๋ฐ์€ ๋นจ๊ฐ•) */
98
  --background-fill-primary: linear-gradient(to right, #FFF5F5, #FED7D7, #FEB2B2);
99
  }
100
-
101
- footer {
102
- visibility: hidden;
103
- }
104
-
105
- .gradio-container {
106
- background: var(--background-fill-primary);
107
- }
108
-
109
- .title {
110
- color: var(--color-primary) !important;
111
- font-size: 3rem !important;
112
- font-weight: 700 !important;
113
- text-align: center;
114
- margin: 1rem 0;
115
- text-shadow: 2px 2px 4px rgba(0,0,0,0.05);
116
- font-family: 'Playfair Display', serif;
117
- }
118
-
119
- .subtitle {
120
- color: #4A5568 !important;
121
- font-size: 1.2rem !important;
122
- text-align: center;
123
- margin-bottom: 1.5rem;
124
- font-style: italic;
125
- }
126
-
127
- .collection-link {
128
- text-align: center;
129
- margin-bottom: 2rem;
130
- font-size: 1.1rem;
131
- }
132
-
133
- .collection-link a {
134
- color: var(--color-primary);
135
- text-decoration: underline;
136
- transition: color 0.3s ease;
137
- }
138
-
139
- .collection-link a:hover {
140
- color: var(--color-secondary);
141
- }
142
-
143
- .model-description {
144
- background-color: rgba(255, 255, 255, 0.8);
145
- border-radius: 12px;
146
- padding: 24px;
147
- margin: 20px 0;
148
- box-shadow: 0 4px 12px rgba(0, 0, 0, 0.05);
149
- border-left: 5px solid var(--color-primary);
150
- }
151
-
152
- button.primary {
153
- background-color: var(--color-primary) !important;
154
- transition: all 0.3s ease;
155
- color: #fff !important;
156
- }
157
-
158
- button:hover {
159
- transform: translateY(-2px);
160
- box-shadow: 0 5px 15px rgba(0,0,0,0.1);
161
- }
162
-
163
- .input-container {
164
- border-radius: 10px;
165
- box-shadow: 0 2px 8px rgba(0,0,0,0.05);
166
- background-color: rgba(255, 255, 255, 0.6);
167
- padding: 20px;
168
- margin-bottom: 1rem;
169
- }
170
-
171
- .advanced-settings {
172
- margin-top: 1rem;
173
- padding: 1rem;
174
- border-radius: 10px;
175
- background-color: rgba(255, 255, 255, 0.6);
176
- }
177
-
178
- .example-region {
179
- background-color: rgba(255, 255, 255, 0.5);
180
- border-radius: 10px;
181
- padding: 1rem;
182
- margin-top: 1rem;
183
- }
184
  """
185
 
 
186
  with gr.Blocks(css=custom_css, analytics_enabled=False) as demo:
187
  gr.HTML('<div class="title">Mr. KIM in KOREA</div>')
188
-
189
- # ์ปฌ๋ ‰์…˜ ๋งํฌ ๋˜๋Š” ์•ˆ๋‚ด๋ฌธ์„ ํ•„์š” ์‹œ ์ˆ˜์ •/์‚ญ์ œ
190
  gr.HTML('<div class="collection-link"><a href="https://huggingface.co/collections/openfree/painting-art-ai-681453484ec15ef5978bbeb1" target="_blank">Visit the LoRA Model Collection</a></div>')
191
-
192
- # ๋ชจ๋ธ ์„ค๋ช…: ํŠน์ • ์ •์น˜์ธ์— ๋Œ€ํ•œ LoRA ๋ชจ๋ธ์ž„์„ ์–ธ๊ธ‰
193
  with gr.Group(elem_classes="model-description"):
194
  gr.HTML("""
195
  <p>
@@ -199,85 +188,39 @@ with gr.Blocks(css=custom_css, analytics_enabled=False) as demo:
199
  </p>
200
  """)
201
 
202
- # ๋ฉ”์ธ UI
203
- with gr.Column(elem_id="col-container"):
204
  with gr.Row(elem_classes="input-container"):
205
- prompt = gr.Text(
206
- label="Prompt",
207
- max_lines=1,
208
- placeholder="Enter your prompt (add [trigger] at the end)",
209
- value=examples[0] # ๊ธฐ๋ณธ ์˜ˆ์‹œ
210
- )
211
- run_button = gr.Button("Generate", variant="primary", scale=0)
212
 
213
- result = gr.Image(label="Generated Image")
214
- seed_output = gr.Number(label="Seed", visible=True)
215
 
 
216
  with gr.Accordion("Advanced Settings", open=False, elem_classes="advanced-settings"):
217
- seed = gr.Slider(
218
- label="Seed",
219
- minimum=0,
220
- maximum=MAX_SEED,
221
- step=1,
222
- value=42,
223
- )
224
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
225
-
226
  with gr.Row():
227
- width = gr.Slider(
228
- label="Width",
229
- minimum=256,
230
- maximum=MAX_IMAGE_SIZE,
231
- step=32,
232
- value=1024,
233
- )
234
- height = gr.Slider(
235
- label="Height",
236
- minimum=256,
237
- maximum=MAX_IMAGE_SIZE,
238
- step=32,
239
- value=768,
240
- )
241
-
242
  with gr.Row():
243
- guidance_scale = gr.Slider(
244
- label="Guidance scale",
245
- minimum=0.0,
246
- maximum=10.0,
247
- step=0.1,
248
- value=3.5,
249
- )
250
- num_inference_steps = gr.Slider(
251
- label="Number of inference steps",
252
- minimum=1,
253
- maximum=50,
254
- step=1,
255
- value=30,
256
- )
257
- lora_scale = gr.Slider(
258
- label="LoRA scale",
259
- minimum=0.0,
260
- maximum=1.0,
261
- step=0.1,
262
- value=1.0,
263
- )
264
 
 
265
  with gr.Group(elem_classes="example-region"):
266
  gr.Markdown("### Examples")
267
- gr.Examples(
268
- examples=examples,
269
- inputs=prompt,
270
- outputs=None, # Don't auto-run examples
271
- fn=None, # No function to run for examples - just fill the prompt
272
- cache_examples=False,
273
- )
274
 
275
- # ์ด๋ฒคํŠธ ํ•ธ๋“ค๋Ÿฌ
276
- gr.on(
277
- triggers=[run_button.click, prompt.submit],
278
- fn=inference,
279
  inputs=[
280
- prompt,
 
281
  seed,
282
  randomize_seed,
283
  width,
@@ -286,8 +229,9 @@ with gr.Blocks(css=custom_css, analytics_enabled=False) as demo:
286
  num_inference_steps,
287
  lora_scale,
288
  ],
289
- outputs=[result, seed_output],
290
  )
291
 
 
292
  demo.queue()
293
  demo.launch()
 
1
  import random
2
  import os
3
  import uuid
4
+ import re
5
+ import time
6
  from datetime import datetime
7
+
8
  import gradio as gr
9
  import numpy as np
10
+ import requests
11
  import torch
12
  from diffusers import DiffusionPipeline
13
  from PIL import Image
14
 
15
+ # ===== OpenAI ์„ค์ • =====
16
+ from openai import OpenAI
17
+
18
+ client = OpenAI(api_key=os.getenv("LLM_API")) # ํ™˜๊ฒฝ ๋ณ€์ˆ˜์— API ํ‚ค๊ฐ€ ์žˆ์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค.
19
+
20
+ # ===== ํ”„๋กฌํ”„ํŠธ ์ฆ๊ฐ•์šฉ ์Šคํƒ€์ผ ํ”„๋ฆฌ์…‹ =====
21
+ STYLE_PRESETS = {
22
+ "None": "",
23
+ "Realistic Photo": "photorealistic, 8k, ultra-detailed, cinematic lighting, realistic skin texture",
24
+ "Oil Painting": "oil painting, rich brush strokes, canvas texture, baroque lighting",
25
+ "Comic Book": "comic book style, bold ink outlines, cel shading, vibrant colors",
26
+ "Watercolor": "watercolor illustration, soft gradients, splatter effect, pastel palette",
27
+ }
28
+
29
+ # ===== ์ €์žฅ ํด๋” =====
30
  SAVE_DIR = "saved_images" # Gradio will handle the persistence
31
  if not os.path.exists(SAVE_DIR):
32
  os.makedirs(SAVE_DIR, exist_ok=True)
33
 
34
+ # ===== ๋””๋ฐ”์ด์Šค & ๋ชจ๋ธ ๋กœ๋“œ =====
35
  device = "cuda" if torch.cuda.is_available() else "cpu"
36
  repo_id = "black-forest-labs/FLUX.1-dev"
37
  adapter_id = "seawolf2357/kim-korea" # ํŠน์ • ์ •์น˜์ธ์„ ํ•™์Šตํ•œ LoRA ๋ชจ๋ธ
 
43
  MAX_SEED = np.iinfo(np.int32).max
44
  MAX_IMAGE_SIZE = 1024
45
 
46
+ # ===== ํ•œ๊ธ€ ์—ฌ๋ถ€ ํŒ๋ณ„ =====
47
+ HANGUL_RE = re.compile(r"[\u3131-\u318E\uAC00-\uD7A3]+")
48
+
49
+ def is_korean(text: str) -> bool:
50
+ return bool(HANGUL_RE.search(text))
51
+
52
+ # ===== ๋ฒˆ์—ญ & ์ฆ๊ฐ• ํ•จ์ˆ˜ =====
53
+
54
+ def openai_translate(text: str, retries: int = 3) -> str:
55
+ """ํ•œ๊ธ€์„ ์˜์–ด๋กœ ๋ฒˆ์—ญ (OpenAI GPT-4.1-mini ์‚ฌ์šฉ). ์˜์–ด ์ž…๋ ฅ์ด๋ฉด ๊ทธ๋Œ€๋กœ ๋ฐ˜ํ™˜."""
56
+ if not is_korean(text):
57
+ return text
58
+
59
+ for attempt in range(retries):
60
+ try:
61
+ res = client.chat.completions.create(
62
+ model="gpt-4.1-mini",
63
+ messages=[
64
+ {
65
+ "role": "system",
66
+ "content": "Translate the following Korean prompt into concise, descriptive English suitable for an image generation model. Keep the meaning, do not add new concepts."
67
+ },
68
+ {"role": "user", "content": text}
69
+ ],
70
+ temperature=0.3,
71
+ max_tokens=256,
72
+ )
73
+ return res.choices[0].message.content.strip()
74
+ except (requests.exceptions.RequestException, Exception) as e:
75
+ print(f"[translate] attempt {attempt + 1} failed: {e}")
76
+ time.sleep(2)
77
+ return text # ๋ฒˆ์—ญ ์‹คํŒจ ์‹œ ์›๋ฌธ ๊ทธ๋Œ€๋กœ
78
+
79
+ def prepare_prompt(user_prompt: str, style_key: str) -> str:
80
+ """ํ•œ๊ธ€์ด๋ฉด ๋ฒˆ์—ญํ•˜๊ณ , ์„ ํƒํ•œ ์Šคํƒ€์ผ ํ”„๋ฆฌ์…‹์„ ๋ถ™์—ฌ์„œ ์ตœ์ข… ํ”„๋กฌํ”„ํŠธ๋ฅผ ๋งŒ๋“ ๋‹ค."""
81
+ prompt_en = openai_translate(user_prompt)
82
+ style_suffix = STYLE_PRESETS.get(style_key, "")
83
+ if style_suffix:
84
+ final_prompt = f"{prompt_en}, {style_suffix}"
85
+ else:
86
+ final_prompt = prompt_en
87
+ return final_prompt
88
+
89
+ # ===== ์ด๋ฏธ์ง€ ์ €์žฅ =====
90
+
91
+ def save_generated_image(image: Image.Image, prompt: str) -> str:
92
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
93
  unique_id = str(uuid.uuid4())[:8]
94
  filename = f"{timestamp}_{unique_id}.png"
95
  filepath = os.path.join(SAVE_DIR, filename)
 
 
96
  image.save(filepath)
97
+
98
+ # ๋ฉ”ํƒ€๋ฐ์ดํ„ฐ ์ €์žฅ
99
  metadata_file = os.path.join(SAVE_DIR, "metadata.txt")
100
  with open(metadata_file, "a", encoding="utf-8") as f:
101
  f.write(f"{filename}|{prompt}|{timestamp}\n")
 
102
  return filepath
103
 
104
+ # ===== Diffusion ํ˜ธ์ถœ =====
105
+
106
+ def run_pipeline(prompt: str, seed: int, width: int, height: int, guidance_scale: float, num_steps: int, lora_scale: float):
 
 
 
 
 
 
 
 
 
 
 
107
  generator = torch.Generator(device=device).manual_seed(int(seed))
108
+ result = pipeline(
 
109
  prompt=prompt,
110
  guidance_scale=guidance_scale,
111
+ num_inference_steps=num_steps,
112
  width=width,
113
  height=height,
114
  generator=generator,
115
  joint_attention_kwargs={"scale": lora_scale},
116
  ).images[0]
117
+ return result
 
 
 
 
 
118
 
119
+ # ===== Gradio inference ๋ž˜ํผ =====
120
 
121
+ @spaces.GPU(duration=60)
122
+ def generate_image(
123
+ user_prompt: str,
124
+ style_key: str,
125
+ seed: int = 42,
126
+ randomize_seed: bool = True,
127
+ width: int = 1024,
128
+ height: int = 768,
129
+ guidance_scale: float = 3.5,
130
+ num_inference_steps: int = 30,
131
+ lora_scale: float = 1.0,
132
+ progress=None,
133
+ ):
134
+ if randomize_seed:
135
+ seed = random.randint(0, MAX_SEED)
136
+
137
+ # 1) ๋ฒˆ์—ญ + ์ฆ๊ฐ•
138
+ final_prompt = prepare_prompt(user_prompt, style_key)
139
+
140
+ # 2) ํŒŒ์ดํ”„๋ผ์ธ ํ˜ธ์ถœ
141
+ image = run_pipeline(final_prompt, seed, width, height, guidance_scale, num_inference_steps, lora_scale)
142
+
143
+ # 3) ์ €์žฅ
144
+ save_generated_image(image, final_prompt)
145
+
146
+ return image, seed
147
+
148
+ # ===== ์˜ˆ์‹œ ํ”„๋กฌํ”„ํŠธ (ํ•œ๊ตญ์–ด/์˜์–ด ํ˜ผ์šฉ ํ—ˆ์šฉ) =====
149
  examples = [
150
+ "๊น€ ํ›„๋ณด๊ฐ€ ํƒœ๊ทน๊ธฐ๋ฅผ ๋“ค๊ณ  ํž˜์ฐฌ ๋ฏธ์†Œ๋ฅผ ์ง“๋Š” ๋ชจ์Šต์„ 8K๋กœ", # ํ•œ๊ธ€ ์˜ˆ์‹œ (์ž๋™ ๋ฒˆ์—ญ)
151
  "Mr. KIM raising both arms in celebration with a triumphant expression, showing victory and hope for the future.",
152
+ "๊น€ ํ›„๋ณด๊ฐ€ ๊ณต์›์—์„œ ์กฐ๊น… ์ค‘ ๊ฑด๊ฐ•ํ•œ ๋ฆฌ๋”์‹ญ์„ ๋ณด์—ฌ์ฃผ๋Š” ์žฅ๋ฉด", # ํ•œ๊ธ€ ์˜ˆ์‹œ
 
 
 
 
 
 
 
 
153
  ]
154
 
155
+ # ===== ์ปค์Šคํ…€ CSS (๋ถ‰์€ ํ†ค ์œ ์ง€) =====
156
  custom_css = """
157
  :root {
158
+ --color-primary: #8F1A3A;
159
+ --color-secondary: #FF4B4B;
160
  --background-fill-primary: linear-gradient(to right, #FFF5F5, #FED7D7, #FEB2B2);
161
  }
162
+ footer {visibility: hidden;}
163
+ .gradio-container {background: var(--background-fill-primary);}
164
+ .title {color: var(--color-primary)!important; font-size:3rem!important; font-weight:700!important; text-align:center; margin:1rem 0; font-family:'Playfair Display',serif;}
165
+ .subtitle {color:#4A5568!important; font-size:1.2rem!important; text-align:center; margin-bottom:1.5rem; font-style:italic;}
166
+ .collection-link {text-align:center; margin-bottom:2rem; font-size:1.1rem;}
167
+ .collection-link a {color:var(--color-primary); text-decoration:underline; transition:color .3s ease;}
168
+ .collection-link a:hover {color:var(--color-secondary);}
169
+ .model-description{background:rgba(255,255,255,.8); border-radius:12px; padding:24px; margin:20px 0; box-shadow:0 4px 12px rgba(0,0,0,.05); border-left:5px solid var(--color-primary);}
170
+ button.primary{background:var(--color-primary)!important; color:#fff!important; transition:all .3s ease;}
171
+ button:hover{transform:translateY(-2px); box-shadow:0 5px 15px rgba(0,0,0,.1);}
172
+ .input-container{border-radius:10px; box-shadow:0 2px 8px rgba(0,0,0,.05); background:rgba(255,255,255,.6); padding:20px; margin-bottom:1rem;}
173
+ .advanced-settings{margin-top:1rem; padding:1rem; border-radius:10px; background:rgba(255,255,255,.6);}
174
+ .example-region{background:rgba(255,255,255,.5); border-radius:10px; padding:1rem; margin-top:1rem;}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  """
176
 
177
+ # ===== Gradio UI =====
178
  with gr.Blocks(css=custom_css, analytics_enabled=False) as demo:
179
  gr.HTML('<div class="title">Mr. KIM in KOREA</div>')
 
 
180
  gr.HTML('<div class="collection-link"><a href="https://huggingface.co/collections/openfree/painting-art-ai-681453484ec15ef5978bbeb1" target="_blank">Visit the LoRA Model Collection</a></div>')
181
+
 
182
  with gr.Group(elem_classes="model-description"):
183
  gr.HTML("""
184
  <p>
 
188
  </p>
189
  """)
190
 
191
+ # ===== ๋ฉ”์ธ ์ž…๋ ฅ =====
192
+ with gr.Column():
193
  with gr.Row(elem_classes="input-container"):
194
+ user_prompt = gr.Text(label="Prompt", max_lines=1, value=examples[0])
195
+ style_select = gr.Radio(label="Style Preset", choices=list(STYLE_PRESETS.keys()), value="None", interactive=True)
196
+ run_button = gr.Button("Generate", variant="primary")
 
 
 
 
197
 
198
+ result_image = gr.Image(label="Generated Image")
199
+ seed_output = gr.Number(label="Seed")
200
 
201
+ # ===== ๊ณ ๊ธ‰ ์„ค์ • =====
202
  with gr.Accordion("Advanced Settings", open=False, elem_classes="advanced-settings"):
203
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
 
 
 
 
 
 
204
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
205
  with gr.Row():
206
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
207
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=768)
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  with gr.Row():
209
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=3.5)
210
+ num_inference_steps = gr.Slider(label="Inference steps", minimum=1, maximum=50, step=1, value=30)
211
+ lora_scale = gr.Slider(label="LoRA scale", minimum=0.0, maximum=1.0, step=0.1, value=1.0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
 
213
+ # ===== ์˜ˆ์‹œ ์˜์—ญ =====
214
  with gr.Group(elem_classes="example-region"):
215
  gr.Markdown("### Examples")
216
+ gr.Examples(examples=examples, inputs=user_prompt, cache_examples=False)
 
 
 
 
 
 
217
 
218
+ # ===== ์ด๋ฒคํŠธ =====
219
+ run_button.click(
220
+ fn=generate_image,
 
221
  inputs=[
222
+ user_prompt,
223
+ style_select,
224
  seed,
225
  randomize_seed,
226
  width,
 
229
  num_inference_steps,
230
  lora_scale,
231
  ],
232
+ outputs=[result_image, seed_output],
233
  )
234
 
235
+
236
  demo.queue()
237
  demo.launch()