Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -73,6 +73,12 @@ if torch.cuda.is_available():
|
|
73 |
|
74 |
@spaces.GPU(duration=300)
|
75 |
def infer_t2i(prompt, seed=42, randomize_seed=False, width=1024, height=576, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
if randomize_seed:
|
77 |
seed = random.randint(0, MAX_SEED)
|
78 |
generator = torch.Generator(device=device).manual_seed(seed)
|
@@ -88,7 +94,7 @@ def infer_t2i(prompt, seed=42, randomize_seed=False, width=1024, height=576, gui
|
|
88 |
).images[0]
|
89 |
|
90 |
torch.cuda.empty_cache()
|
91 |
-
return image, seed
|
92 |
|
93 |
|
94 |
@spaces.GPU(duration=300)
|
@@ -244,10 +250,15 @@ with gr.Blocks(analytics_enabled=False, css=css) as dynamicrafter_iface:
|
|
244 |
t2i_output_image = gr.Image(label="Generated Image", elem_id="t2i_output_img")
|
245 |
t2i_output_seed = gr.Number(label="Used Seed", elem_id="t2i_output_seed")
|
246 |
|
247 |
-
t2i_generate_btn.
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
)
|
252 |
|
|
|
|
|
|
|
|
|
|
|
|
|
253 |
dynamicrafter_iface.queue(max_size=12).launch(show_api=True)
|
|
|
73 |
|
74 |
@spaces.GPU(duration=300)
|
75 |
def infer_t2i(prompt, seed=42, randomize_seed=False, width=1024, height=576, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
|
76 |
+
# 한글 입력 감지 및 번역
|
77 |
+
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
|
78 |
+
translated = translator(prompt, max_length=512)[0]['translation_text']
|
79 |
+
prompt = translated
|
80 |
+
print(f"Translated prompt: {prompt}")
|
81 |
+
|
82 |
if randomize_seed:
|
83 |
seed = random.randint(0, MAX_SEED)
|
84 |
generator = torch.Generator(device=device).manual_seed(seed)
|
|
|
94 |
).images[0]
|
95 |
|
96 |
torch.cuda.empty_cache()
|
97 |
+
return image, seed, prompt # 번역된 프롬프트도 반환
|
98 |
|
99 |
|
100 |
@spaces.GPU(duration=300)
|
|
|
250 |
t2i_output_image = gr.Image(label="Generated Image", elem_id="t2i_output_img")
|
251 |
t2i_output_seed = gr.Number(label="Used Seed", elem_id="t2i_output_seed")
|
252 |
|
253 |
+
t2i_generate_btn = gr.Button("Generate")
|
254 |
+
t2i_output_image = gr.Image(label="Generated Image", elem_id="t2i_output_img")
|
255 |
+
t2i_output_seed = gr.Number(label="Used Seed", elem_id="t2i_output_seed")
|
256 |
+
t2i_translated_prompt = gr.Text(label="Translated Prompt (if applicable)", elem_id="t2i_translated_prompt")
|
|
|
257 |
|
258 |
+
t2i_generate_btn.click(
|
259 |
+
fn=infer_t2i,
|
260 |
+
inputs=[t2i_input_text, t2i_seed, t2i_randomize_seed, t2i_width, t2i_height, t2i_guidance_scale, t2i_num_inference_steps],
|
261 |
+
outputs=[t2i_output_image, t2i_output_seed, t2i_translated_prompt]
|
262 |
+
)
|
263 |
+
|
264 |
dynamicrafter_iface.queue(max_size=12).launch(show_api=True)
|