Update app.py
Browse files
app.py
CHANGED
@@ -4,11 +4,15 @@ import random
|
|
4 |
import torch
|
5 |
from diffusers import DiffusionPipeline
|
6 |
import spaces
|
|
|
7 |
|
8 |
# ๊ธฐ๋ณธ ์ค์
|
9 |
dtype = torch.bfloat16
|
10 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
|
|
|
|
|
|
|
12 |
# ๋ชจ๋ธ ๋ก๋
|
13 |
pipe = DiffusionPipeline.from_pretrained(
|
14 |
"black-forest-labs/FLUX.1-schnell",
|
@@ -78,11 +82,32 @@ GRADIO_EXAMPLES = [
|
|
78 |
for example in EXAMPLES
|
79 |
]
|
80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
@spaces.GPU()
|
82 |
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
if randomize_seed:
|
84 |
seed = random.randint(0, MAX_SEED)
|
|
|
85 |
generator = torch.Generator().manual_seed(seed)
|
|
|
|
|
86 |
image = pipe(
|
87 |
prompt=prompt,
|
88 |
width=width,
|
@@ -91,7 +116,12 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_in
|
|
91 |
generator=generator,
|
92 |
guidance_scale=0.0
|
93 |
).images[0]
|
94 |
-
|
|
|
|
|
|
|
|
|
|
|
95 |
|
96 |
# CSS ์คํ์ผ (๊ธฐ์กด ๊ตฌ์กฐ ์ ์ง)
|
97 |
css = """
|
@@ -164,6 +194,14 @@ css = """
|
|
164 |
.examples-area {
|
165 |
flex: 1 !important;
|
166 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
"""
|
168 |
|
169 |
with gr.Blocks(css=css) as demo:
|
@@ -178,13 +216,25 @@ with gr.Blocks(css=css) as demo:
|
|
178 |
with gr.Column(elem_id="input-column", scale=2):
|
179 |
with gr.Group(elem_classes="input-box"):
|
180 |
prompt = gr.Text(
|
181 |
-
label="Design Prompt",
|
182 |
-
placeholder="Enter your product design concept details...",
|
183 |
lines=10,
|
184 |
elem_classes="prompt-input"
|
185 |
)
|
186 |
run_button = gr.Button("Generate Design", variant="primary")
|
187 |
result = gr.Image(label="Generated Design")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
|
189 |
with gr.Accordion("Advanced Settings", open=False):
|
190 |
seed = gr.Slider(
|
@@ -247,7 +297,15 @@ with gr.Blocks(css=css) as demo:
|
|
247 |
triggers=[run_button.click, prompt.submit],
|
248 |
fn=infer,
|
249 |
inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps],
|
250 |
-
outputs=[result, seed]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
251 |
)
|
252 |
|
253 |
if __name__ == "__main__":
|
@@ -258,4 +316,4 @@ if __name__ == "__main__":
|
|
258 |
share=False,
|
259 |
show_error=True,
|
260 |
debug=True
|
261 |
-
)
|
|
|
4 |
import torch
|
5 |
from diffusers import DiffusionPipeline
|
6 |
import spaces
|
7 |
+
from transformers import pipeline
|
8 |
|
9 |
# ๊ธฐ๋ณธ ์ค์
|
10 |
dtype = torch.bfloat16
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
|
13 |
+
# ํ๊ตญ์ด-์์ด ๋ฒ์ญ ๋ชจ๋ธ ๋ก๋ (CPU์์ ์คํ)
|
14 |
+
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", device="cpu")
|
15 |
+
|
16 |
# ๋ชจ๋ธ ๋ก๋
|
17 |
pipe = DiffusionPipeline.from_pretrained(
|
18 |
"black-forest-labs/FLUX.1-schnell",
|
|
|
82 |
for example in EXAMPLES
|
83 |
]
|
84 |
|
85 |
+
# ํ๊ตญ์ด ๊ฐ์ง ํจ์
|
86 |
+
def contains_korean(text):
|
87 |
+
for char in text:
|
88 |
+
if ord('๊ฐ') <= ord(char) <= ord('ํฃ'):
|
89 |
+
return True
|
90 |
+
return False
|
91 |
+
|
92 |
+
# ํ์์ ๋ฒ์ญ ํ ์ถ๋ก ํจ์
|
93 |
@spaces.GPU()
|
94 |
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
|
95 |
+
# ํ๊ตญ์ด ๊ฐ์ง ๋ฐ ๋ฒ์ญ
|
96 |
+
original_prompt = prompt
|
97 |
+
translated = False
|
98 |
+
|
99 |
+
if contains_korean(prompt):
|
100 |
+
translated = True
|
101 |
+
translation = translator(prompt)
|
102 |
+
prompt = translation[0]['translation_text']
|
103 |
+
|
104 |
+
# ๋๋ค ์๋ ์ค์
|
105 |
if randomize_seed:
|
106 |
seed = random.randint(0, MAX_SEED)
|
107 |
+
|
108 |
generator = torch.Generator().manual_seed(seed)
|
109 |
+
|
110 |
+
# ๋ชจ๋ธ ์คํ
|
111 |
image = pipe(
|
112 |
prompt=prompt,
|
113 |
width=width,
|
|
|
116 |
generator=generator,
|
117 |
guidance_scale=0.0
|
118 |
).images[0]
|
119 |
+
|
120 |
+
# ๋ฒ์ญ ์ ๋ณด ๋ฐํ
|
121 |
+
if translated:
|
122 |
+
return image, seed, original_prompt, prompt
|
123 |
+
else:
|
124 |
+
return image, seed, None, None
|
125 |
|
126 |
# CSS ์คํ์ผ (๊ธฐ์กด ๊ตฌ์กฐ ์ ์ง)
|
127 |
css = """
|
|
|
194 |
.examples-area {
|
195 |
flex: 1 !important;
|
196 |
}
|
197 |
+
.translation-info {
|
198 |
+
background-color: #f8f9fa;
|
199 |
+
border-left: 4px solid #17a2b8;
|
200 |
+
padding: 10px 15px;
|
201 |
+
margin-top: 10px;
|
202 |
+
border-radius: 4px;
|
203 |
+
font-size: 14px;
|
204 |
+
}
|
205 |
"""
|
206 |
|
207 |
with gr.Blocks(css=css) as demo:
|
|
|
216 |
with gr.Column(elem_id="input-column", scale=2):
|
217 |
with gr.Group(elem_classes="input-box"):
|
218 |
prompt = gr.Text(
|
219 |
+
label="Design Prompt (ํ๊ตญ์ด ๋๋ ์์ด๋ก ์
๋ ฅํ์ธ์)",
|
220 |
+
placeholder="Enter your product design concept details in Korean or English...",
|
221 |
lines=10,
|
222 |
elem_classes="prompt-input"
|
223 |
)
|
224 |
run_button = gr.Button("Generate Design", variant="primary")
|
225 |
result = gr.Image(label="Generated Design")
|
226 |
+
|
227 |
+
# ๋ฒ์ญ ์ ๋ณด ํ์ ์์ญ
|
228 |
+
original_prompt = gr.Textbox(visible=False)
|
229 |
+
translated_prompt = gr.Textbox(visible=False)
|
230 |
+
translation_info = gr.Markdown(visible=False, elem_classes="translation-info")
|
231 |
+
|
232 |
+
# ๋ฒ์ญ ์ ๋ณด ์
๋ฐ์ดํธ ํจ์
|
233 |
+
def update_translation_info(original, translated):
|
234 |
+
if original and translated:
|
235 |
+
return gr.update(visible=True, value=f"๐ Korean prompt was translated to English:\n\n**Original:** {original}\n\n**Translated:** {translated}")
|
236 |
+
else:
|
237 |
+
return gr.update(visible=False)
|
238 |
|
239 |
with gr.Accordion("Advanced Settings", open=False):
|
240 |
seed = gr.Slider(
|
|
|
297 |
triggers=[run_button.click, prompt.submit],
|
298 |
fn=infer,
|
299 |
inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps],
|
300 |
+
outputs=[result, seed, original_prompt, translated_prompt]
|
301 |
+
)
|
302 |
+
|
303 |
+
# ๋ฒ์ญ ์ ๋ณด ์
๋ฐ์ดํธ ์ด๋ฒคํธ
|
304 |
+
gr.on(
|
305 |
+
triggers=[original_prompt.change, translated_prompt.change],
|
306 |
+
fn=update_translation_info,
|
307 |
+
inputs=[original_prompt, translated_prompt],
|
308 |
+
outputs=[translation_info]
|
309 |
)
|
310 |
|
311 |
if __name__ == "__main__":
|
|
|
316 |
share=False,
|
317 |
show_error=True,
|
318 |
debug=True
|
319 |
+
)
|