Update app.py
Browse files
app.py
CHANGED
@@ -26,6 +26,10 @@ from run_group_pnp_edit import init_pnp
|
|
26 |
from diffusers import DDIMInverseScheduler, DDIMScheduler
|
27 |
from diffusers.utils import load_image
|
28 |
import imageio
|
|
|
|
|
|
|
|
|
29 |
|
30 |
DEBUG_MODE = False
|
31 |
|
@@ -261,14 +265,15 @@ def btn_image_edit_fn(video_path, instruct_prompt, ie_force_512, ie_seed, ie_neg
|
|
261 |
"""
|
262 |
Generate an image based on the video and text input.
|
263 |
"""
|
264 |
-
#
|
|
|
265 |
|
266 |
if ie_seed < 0:
|
267 |
ie_seed = int.from_bytes(os.urandom(2), "big")
|
268 |
print(f"Using seed: {ie_seed}")
|
269 |
|
270 |
edited_image_path = perform_edit(video_path=video_path,
|
271 |
-
prompt=
|
272 |
force_512=ie_force_512,
|
273 |
seed=ie_seed,
|
274 |
negative_prompt=ie_neg_prompt)
|
@@ -288,12 +293,15 @@ def btn_infer_fn(video_path,
|
|
288 |
ddim_inversion_steps,
|
289 |
seed,
|
290 |
):
|
|
|
|
|
|
|
291 |
if seed < 0:
|
292 |
seed = int.from_bytes(os.urandom(2), "big")
|
293 |
print(f"Using seed: {seed}")
|
294 |
|
295 |
result_video_path = perform_anyv2v(video_path=video_path,
|
296 |
-
video_prompt=
|
297 |
video_negative_prompt=video_negative_prompt,
|
298 |
edited_first_frame_path=edited_first_frame_path,
|
299 |
conv_inj=conv_inj,
|
@@ -416,4 +424,4 @@ with gr.Blocks() as demo:
|
|
416 |
torch.set_grad_enabled(False)
|
417 |
|
418 |
|
419 |
-
demo.launch()
|
|
|
26 |
from diffusers import DDIMInverseScheduler, DDIMScheduler
|
27 |
from diffusers.utils import load_image
|
28 |
import imageio
|
29 |
+
from transformers import pipeline
|
30 |
+
|
31 |
+
# Initialize the translation pipeline
|
32 |
+
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
33 |
|
34 |
DEBUG_MODE = False
|
35 |
|
|
|
265 |
"""
|
266 |
Generate an image based on the video and text input.
|
267 |
"""
|
268 |
+
# Translate the instruction prompt from Korean to English
|
269 |
+
translated_prompt = translator(instruct_prompt, src_lang="ko", tgt_lang="en")[0]['translation_text']
|
270 |
|
271 |
if ie_seed < 0:
|
272 |
ie_seed = int.from_bytes(os.urandom(2), "big")
|
273 |
print(f"Using seed: {ie_seed}")
|
274 |
|
275 |
edited_image_path = perform_edit(video_path=video_path,
|
276 |
+
prompt=translated_prompt,
|
277 |
force_512=ie_force_512,
|
278 |
seed=ie_seed,
|
279 |
negative_prompt=ie_neg_prompt)
|
|
|
293 |
ddim_inversion_steps,
|
294 |
seed,
|
295 |
):
|
296 |
+
# Translate the video prompt from Korean to English
|
297 |
+
translated_video_prompt = translator(video_prompt, src_lang="ko", tgt_lang="en")[0]['translation_text']
|
298 |
+
|
299 |
if seed < 0:
|
300 |
seed = int.from_bytes(os.urandom(2), "big")
|
301 |
print(f"Using seed: {seed}")
|
302 |
|
303 |
result_video_path = perform_anyv2v(video_path=video_path,
|
304 |
+
video_prompt=translated_video_prompt,
|
305 |
video_negative_prompt=video_negative_prompt,
|
306 |
edited_first_frame_path=edited_first_frame_path,
|
307 |
conv_inj=conv_inj,
|
|
|
424 |
torch.set_grad_enabled(False)
|
425 |
|
426 |
|
427 |
+
demo.launch()
|