Spaces:
Running
Running
daquanzhou
commited on
Commit
•
507f91c
1
Parent(s):
8b6c408
add three buttons
Browse files- app.py +319 -7
- models/embeddings/emotion-angry.pt +0 -3
- models/embeddings/emotion-defiance.pt +0 -3
- models/embeddings/emotion-grin.pt +0 -3
- models/embeddings/emotion-happy.pt +0 -3
- models/embeddings/emotion-laugh.pt +0 -3
- models/embeddings/emotion-sad.pt +0 -3
- models/embeddings/emotion-shock.pt +0 -3
- models/embeddings/emotion-smile.pt +0 -3
- models/embeddings/lisa.pt +0 -3
app.py
CHANGED
@@ -232,7 +232,9 @@ class MagicMeController:
|
|
232 |
id_embed_list = glob(os.path.join(self.id_embed_dir, "*.pt"))
|
233 |
self.id_embed_list = [Path(p).stem for p in id_embed_list]
|
234 |
|
235 |
-
|
|
|
|
|
236 |
category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
|
237 |
prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
|
238 |
print("prompt:", prompt)
|
@@ -463,6 +465,310 @@ class MagicMeController:
|
|
463 |
|
464 |
|
465 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
466 |
import_custom_nodes()
|
467 |
c = MagicMeController()
|
468 |
|
@@ -537,7 +843,7 @@ def ui():
|
|
537 |
### Quick Start
|
538 |
1. Select desired `ID embedding`.
|
539 |
2. Provide `Prompt` and `Negative Prompt`. Please use propoer pronoun for the character's gender.
|
540 |
-
|
541 |
"""
|
542 |
)
|
543 |
with gr.Row():
|
@@ -555,12 +861,14 @@ def ui():
|
|
555 |
with gr.Row():
|
556 |
gaussian_slider = gr.Slider( label="3D Gaussian Noise Covariance", value=0.2, minimum=0, maximum=1, step=0.05 )
|
557 |
with gr.Row():
|
558 |
-
seed_textbox = gr.Textbox( label="Seed", value
|
559 |
seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
|
560 |
seed_button.click(fn=lambda: gr.Textbox.update(value=random.randint(1, 1e16)), inputs=[], outputs=[seed_textbox])
|
561 |
json_config = gr.Json(label="Config", value=None )
|
562 |
-
|
563 |
-
|
|
|
|
|
564 |
|
565 |
with gr.Row():
|
566 |
orig_video = gr.Video( label="Video after T2I VCD", interactive=False )
|
@@ -568,9 +876,13 @@ def ui():
|
|
568 |
sr_video = gr.Video( label="Video after Tiled VCD", interactive=False )
|
569 |
|
570 |
inputs = [prompt_textbox, negative_prompt_textbox, id_embed_dropdown, gaussian_slider, seed_textbox]
|
571 |
-
|
|
|
|
|
572 |
|
573 |
-
|
|
|
|
|
574 |
|
575 |
# gr.Examples( fn=c.run_once, examples=examples, inputs=inputs, outputs=outputs, cache_examples=True )
|
576 |
|
|
|
232 |
id_embed_list = glob(os.path.join(self.id_embed_dir, "*.pt"))
|
233 |
self.id_embed_list = [Path(p).stem for p in id_embed_list]
|
234 |
|
235 |
+
|
236 |
+
|
237 |
+
def run_t2v_face_tiled(self, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
|
238 |
category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
|
239 |
prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
|
240 |
print("prompt:", prompt)
|
|
|
465 |
|
466 |
|
467 |
|
468 |
+
def run_t2v_face(self, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
|
469 |
+
category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
|
470 |
+
prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
|
471 |
+
print("prompt:", prompt)
|
472 |
+
print("negative_prompt_text_box:", negative_prompt_text_box)
|
473 |
+
print("id_embed_dropdown:", id_embed_dropdown)
|
474 |
+
print("gaussian_slider:", gaussian_slider)
|
475 |
+
print("seed_text_box:", seed_text_box)
|
476 |
+
seed_text_box = int(seed_text_box)
|
477 |
+
with torch.inference_mode():
|
478 |
+
cliptextencode = CLIPTextEncode()
|
479 |
+
cliptextencode_6 = cliptextencode.encode(
|
480 |
+
text=negative_prompt_text_box,
|
481 |
+
clip=get_value_at_index(self.checkpointloadersimple_32, 1),
|
482 |
+
)
|
483 |
+
cliptextencode_274 = cliptextencode.encode(
|
484 |
+
text=prompt,
|
485 |
+
clip=get_value_at_index(self.checkpointloadersimple_32, 1),
|
486 |
+
)
|
487 |
+
ade_animatediffloaderwithcontext_261 = (
|
488 |
+
self.ade_animatediffloaderwithcontext.load_mm_and_inject_params(
|
489 |
+
model_name="mm_sd_v15_v2.ckpt",
|
490 |
+
beta_schedule="autoselect",
|
491 |
+
motion_scale=1,
|
492 |
+
apply_v2_models_properly=True,
|
493 |
+
model=get_value_at_index(self.checkpointloadersimple_32, 0),
|
494 |
+
context_options=get_value_at_index(
|
495 |
+
self.ade_animatediffuniformcontextoptions_102, 0
|
496 |
+
),
|
497 |
+
motion_lora=get_value_at_index(self.ade_animatediffloraloader_196, 0),
|
498 |
+
)
|
499 |
+
)
|
500 |
+
|
501 |
+
freeu_v2_151 = self.freeu_v2.patch(
|
502 |
+
b1=1.1,
|
503 |
+
b2=1.2,
|
504 |
+
s1=0.9,
|
505 |
+
s2=0.4,
|
506 |
+
model=get_value_at_index(ade_animatediffloaderwithcontext_261, 0),
|
507 |
+
)
|
508 |
+
|
509 |
+
tobasicpipe_42 = self.tobasicpipe.doit(
|
510 |
+
model=get_value_at_index(freeu_v2_151, 0),
|
511 |
+
clip=get_value_at_index(self.checkpointloadersimple_32, 1),
|
512 |
+
vae=get_value_at_index(self.vaeloader_2, 0),
|
513 |
+
positive=get_value_at_index(cliptextencode_274, 0),
|
514 |
+
negative=get_value_at_index(cliptextencode_6, 0),
|
515 |
+
)
|
516 |
+
|
517 |
+
frombasicpipe_52 = self.frombasicpipe.doit(
|
518 |
+
basic_pipe=get_value_at_index(tobasicpipe_42, 0)
|
519 |
+
)
|
520 |
+
|
521 |
+
bnk_getsigma_254 = self.bnk_getsigma.calc_sigma(
|
522 |
+
sampler_name="dpmpp_2m",
|
523 |
+
scheduler="karras",
|
524 |
+
steps=20,
|
525 |
+
start_at_step=0,
|
526 |
+
end_at_step=20,
|
527 |
+
model=get_value_at_index(frombasicpipe_52, 0),
|
528 |
+
)
|
529 |
+
|
530 |
+
emptylatentimage_223 = self.emptylatentimage.generate(
|
531 |
+
width=512, height=512, batch_size=get_value_at_index(self.impactint_204, 0)
|
532 |
+
)
|
533 |
+
|
534 |
+
magicalbum3dgaussiannoise_262 = self.magicalbum3dgaussiannoise.generate(
|
535 |
+
width=512,
|
536 |
+
height=512,
|
537 |
+
batch_size=get_value_at_index(self.impactint_204, 0),
|
538 |
+
seed=seed_text_box,
|
539 |
+
cov_factor=gaussian_slider,
|
540 |
+
)
|
541 |
+
|
542 |
+
bnk_injectnoise_253 = self.bnk_injectnoise.inject_noise(
|
543 |
+
strength=get_value_at_index(bnk_getsigma_254, 0),
|
544 |
+
latents=get_value_at_index(emptylatentimage_223, 0),
|
545 |
+
noise=get_value_at_index(magicalbum3dgaussiannoise_262, 0),
|
546 |
+
)
|
547 |
+
|
548 |
+
ksampleradvanced_248 = self.ksampleradvanced.sample(
|
549 |
+
add_noise="disable",
|
550 |
+
noise_seed=seed_text_box,
|
551 |
+
steps=20,
|
552 |
+
cfg=8,
|
553 |
+
sampler_name="dpmpp_2m",
|
554 |
+
scheduler="karras",
|
555 |
+
start_at_step=0,
|
556 |
+
end_at_step=20,
|
557 |
+
return_with_leftover_noise="disable",
|
558 |
+
model=get_value_at_index(frombasicpipe_52, 0),
|
559 |
+
positive=get_value_at_index(frombasicpipe_52, 3),
|
560 |
+
negative=get_value_at_index(frombasicpipe_52, 4),
|
561 |
+
latent_image=get_value_at_index(bnk_injectnoise_253, 0),
|
562 |
+
)
|
563 |
+
|
564 |
+
vaedecode_10 = self.vaedecode.decode(
|
565 |
+
samples=get_value_at_index(ksampleradvanced_248, 0),
|
566 |
+
vae=get_value_at_index(frombasicpipe_52, 2),
|
567 |
+
)
|
568 |
+
|
569 |
+
vhs_videocombine_35 = self.vhs_videocombine.combine_video(
|
570 |
+
frame_rate=8,
|
571 |
+
loop_count=0,
|
572 |
+
filename_prefix="orig",
|
573 |
+
format="video/h264-mp4",
|
574 |
+
pingpong=False,
|
575 |
+
save_output=True,
|
576 |
+
images=get_value_at_index(vaedecode_10, 0),
|
577 |
+
unique_id=2001771405939721385,
|
578 |
+
)
|
579 |
+
|
580 |
+
impactsimpledetectorsegs_for_ad_156 = self.impactsimpledetectorsegs_for_ad.doit(
|
581 |
+
bbox_threshold=0.5,
|
582 |
+
bbox_dilation=0,
|
583 |
+
crop_factor=3,
|
584 |
+
drop_size=10,
|
585 |
+
sub_threshold=0.5,
|
586 |
+
sub_dilation=0,
|
587 |
+
sub_bbox_expansion=0,
|
588 |
+
sam_mask_hint_threshold=0.7,
|
589 |
+
masking_mode="Pivot SEGS",
|
590 |
+
segs_pivot="Combined mask",
|
591 |
+
bbox_detector=get_value_at_index(self.ultralyticsdetectorprovider_75, 0),
|
592 |
+
image_frames=get_value_at_index(vaedecode_10, 0),
|
593 |
+
sam_model_opt=get_value_at_index(self.samloader_78, 0),
|
594 |
+
)
|
595 |
+
|
596 |
+
segsdetailerforanimatediff_41 = self.segsdetailerforanimatediff.doit(
|
597 |
+
guide_size=512,
|
598 |
+
guide_size_for=False,
|
599 |
+
max_size=512,
|
600 |
+
seed=seed_text_box,
|
601 |
+
steps=20,
|
602 |
+
cfg=8,
|
603 |
+
sampler_name="euler",
|
604 |
+
scheduler="normal",
|
605 |
+
denoise=0.8,
|
606 |
+
refiner_ratio=0.2,
|
607 |
+
image_frames=get_value_at_index(vaedecode_10, 0),
|
608 |
+
segs=get_value_at_index(impactsimpledetectorsegs_for_ad_156, 0),
|
609 |
+
basic_pipe=get_value_at_index(tobasicpipe_42, 0),
|
610 |
+
)
|
611 |
+
|
612 |
+
segspaste_49 = self.segspaste.doit(
|
613 |
+
feather=5,
|
614 |
+
alpha=255,
|
615 |
+
image=get_value_at_index(vaedecode_10, 0),
|
616 |
+
segs=get_value_at_index(segsdetailerforanimatediff_41, 0),
|
617 |
+
)
|
618 |
+
|
619 |
+
vhs_videocombine_51 = self.vhs_videocombine.combine_video(
|
620 |
+
frame_rate=8,
|
621 |
+
loop_count=0,
|
622 |
+
filename_prefix="face_detailer",
|
623 |
+
format="video/h264-mp4",
|
624 |
+
pingpong=False,
|
625 |
+
save_output=True,
|
626 |
+
images=get_value_at_index(segspaste_49, 0),
|
627 |
+
unique_id=7104489750160636615,
|
628 |
+
)
|
629 |
+
|
630 |
+
|
631 |
+
|
632 |
+
orig_video_path = sorted(glob(os.path.join(self.save_dir, 'orig*.mp4')))[-1]
|
633 |
+
face_detailer_video_path = sorted(glob(os.path.join(self.save_dir, 'face_detailer*.mp4')))[-1]
|
634 |
+
|
635 |
+
json_config = {
|
636 |
+
"prompt": prompt,
|
637 |
+
"n_prompt": negative_prompt_text_box,
|
638 |
+
"id_embed_dropdown": id_embed_dropdown,
|
639 |
+
"gaussian_slider": gaussian_slider,
|
640 |
+
"seed_text_box": seed_text_box
|
641 |
+
}
|
642 |
+
return gr.Video.update(value=orig_video_path), gr.Video.update(value=face_detailer_video_path), gr.Json.update(value=json_config)
|
643 |
+
|
644 |
+
|
645 |
+
|
646 |
+
|
647 |
+
def run_t2v(self, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
|
648 |
+
category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
|
649 |
+
prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
|
650 |
+
print("prompt:", prompt)
|
651 |
+
print("negative_prompt_text_box:", negative_prompt_text_box)
|
652 |
+
print("id_embed_dropdown:", id_embed_dropdown)
|
653 |
+
print("gaussian_slider:", gaussian_slider)
|
654 |
+
print("seed_text_box:", seed_text_box)
|
655 |
+
seed_text_box = int(seed_text_box)
|
656 |
+
with torch.inference_mode():
|
657 |
+
cliptextencode = CLIPTextEncode()
|
658 |
+
cliptextencode_6 = cliptextencode.encode(
|
659 |
+
text=negative_prompt_text_box,
|
660 |
+
clip=get_value_at_index(self.checkpointloadersimple_32, 1),
|
661 |
+
)
|
662 |
+
cliptextencode_274 = cliptextencode.encode(
|
663 |
+
text=prompt,
|
664 |
+
clip=get_value_at_index(self.checkpointloadersimple_32, 1),
|
665 |
+
)
|
666 |
+
ade_animatediffloaderwithcontext_261 = (
|
667 |
+
self.ade_animatediffloaderwithcontext.load_mm_and_inject_params(
|
668 |
+
model_name="mm_sd_v15_v2.ckpt",
|
669 |
+
beta_schedule="autoselect",
|
670 |
+
motion_scale=1,
|
671 |
+
apply_v2_models_properly=True,
|
672 |
+
model=get_value_at_index(self.checkpointloadersimple_32, 0),
|
673 |
+
context_options=get_value_at_index(
|
674 |
+
self.ade_animatediffuniformcontextoptions_102, 0
|
675 |
+
),
|
676 |
+
motion_lora=get_value_at_index(self.ade_animatediffloraloader_196, 0),
|
677 |
+
)
|
678 |
+
)
|
679 |
+
|
680 |
+
freeu_v2_151 = self.freeu_v2.patch(
|
681 |
+
b1=1.1,
|
682 |
+
b2=1.2,
|
683 |
+
s1=0.9,
|
684 |
+
s2=0.4,
|
685 |
+
model=get_value_at_index(ade_animatediffloaderwithcontext_261, 0),
|
686 |
+
)
|
687 |
+
|
688 |
+
tobasicpipe_42 = self.tobasicpipe.doit(
|
689 |
+
model=get_value_at_index(freeu_v2_151, 0),
|
690 |
+
clip=get_value_at_index(self.checkpointloadersimple_32, 1),
|
691 |
+
vae=get_value_at_index(self.vaeloader_2, 0),
|
692 |
+
positive=get_value_at_index(cliptextencode_274, 0),
|
693 |
+
negative=get_value_at_index(cliptextencode_6, 0),
|
694 |
+
)
|
695 |
+
|
696 |
+
frombasicpipe_52 = self.frombasicpipe.doit(
|
697 |
+
basic_pipe=get_value_at_index(tobasicpipe_42, 0)
|
698 |
+
)
|
699 |
+
|
700 |
+
bnk_getsigma_254 = self.bnk_getsigma.calc_sigma(
|
701 |
+
sampler_name="dpmpp_2m",
|
702 |
+
scheduler="karras",
|
703 |
+
steps=20,
|
704 |
+
start_at_step=0,
|
705 |
+
end_at_step=20,
|
706 |
+
model=get_value_at_index(frombasicpipe_52, 0),
|
707 |
+
)
|
708 |
+
|
709 |
+
emptylatentimage_223 = self.emptylatentimage.generate(
|
710 |
+
width=512, height=512, batch_size=get_value_at_index(self.impactint_204, 0)
|
711 |
+
)
|
712 |
+
|
713 |
+
magicalbum3dgaussiannoise_262 = self.magicalbum3dgaussiannoise.generate(
|
714 |
+
width=512,
|
715 |
+
height=512,
|
716 |
+
batch_size=get_value_at_index(self.impactint_204, 0),
|
717 |
+
seed=seed_text_box,
|
718 |
+
cov_factor=gaussian_slider,
|
719 |
+
)
|
720 |
+
|
721 |
+
bnk_injectnoise_253 = self.bnk_injectnoise.inject_noise(
|
722 |
+
strength=get_value_at_index(bnk_getsigma_254, 0),
|
723 |
+
latents=get_value_at_index(emptylatentimage_223, 0),
|
724 |
+
noise=get_value_at_index(magicalbum3dgaussiannoise_262, 0),
|
725 |
+
)
|
726 |
+
|
727 |
+
ksampleradvanced_248 = self.ksampleradvanced.sample(
|
728 |
+
add_noise="disable",
|
729 |
+
noise_seed=seed_text_box,
|
730 |
+
steps=20,
|
731 |
+
cfg=8,
|
732 |
+
sampler_name="dpmpp_2m",
|
733 |
+
scheduler="karras",
|
734 |
+
start_at_step=0,
|
735 |
+
end_at_step=20,
|
736 |
+
return_with_leftover_noise="disable",
|
737 |
+
model=get_value_at_index(frombasicpipe_52, 0),
|
738 |
+
positive=get_value_at_index(frombasicpipe_52, 3),
|
739 |
+
negative=get_value_at_index(frombasicpipe_52, 4),
|
740 |
+
latent_image=get_value_at_index(bnk_injectnoise_253, 0),
|
741 |
+
)
|
742 |
+
|
743 |
+
vaedecode_10 = self.vaedecode.decode(
|
744 |
+
samples=get_value_at_index(ksampleradvanced_248, 0),
|
745 |
+
vae=get_value_at_index(frombasicpipe_52, 2),
|
746 |
+
)
|
747 |
+
|
748 |
+
vhs_videocombine_35 = self.vhs_videocombine.combine_video(
|
749 |
+
frame_rate=8,
|
750 |
+
loop_count=0,
|
751 |
+
filename_prefix="orig",
|
752 |
+
format="video/h264-mp4",
|
753 |
+
pingpong=False,
|
754 |
+
save_output=True,
|
755 |
+
images=get_value_at_index(vaedecode_10, 0),
|
756 |
+
unique_id=2001771405939721385,
|
757 |
+
)
|
758 |
+
|
759 |
+
orig_video_path = sorted(glob(os.path.join(self.save_dir, 'orig*.mp4')))[-1]
|
760 |
+
|
761 |
+
json_config = {
|
762 |
+
"prompt": prompt,
|
763 |
+
"n_prompt": negative_prompt_text_box,
|
764 |
+
"id_embed_dropdown": id_embed_dropdown,
|
765 |
+
"gaussian_slider": gaussian_slider,
|
766 |
+
"seed_text_box": seed_text_box
|
767 |
+
}
|
768 |
+
return gr.Video.update(value=orig_video_path), gr.Json.update(value=json_config)
|
769 |
+
|
770 |
+
|
771 |
+
|
772 |
import_custom_nodes()
|
773 |
c = MagicMeController()
|
774 |
|
|
|
843 |
### Quick Start
|
844 |
1. Select desired `ID embedding`.
|
845 |
2. Provide `Prompt` and `Negative Prompt`. Please use propoer pronoun for the character's gender.
|
846 |
+
3. Click on one of three `Go buttons. The fewer the running modules, the less time you need to wait. Enjoy!
|
847 |
"""
|
848 |
)
|
849 |
with gr.Row():
|
|
|
861 |
with gr.Row():
|
862 |
gaussian_slider = gr.Slider( label="3D Gaussian Noise Covariance", value=0.2, minimum=0, maximum=1, step=0.05 )
|
863 |
with gr.Row():
|
864 |
+
seed_textbox = gr.Textbox( label="Seed", value=random.randint(1, 2 ** 32))
|
865 |
seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
|
866 |
seed_button.click(fn=lambda: gr.Textbox.update(value=random.randint(1, 1e16)), inputs=[], outputs=[seed_textbox])
|
867 |
json_config = gr.Json(label="Config", value=None )
|
868 |
+
with gr.Row():
|
869 |
+
generate_button_t2v = gr.Button( value="Go (T2V VCD)", variant='primary' )
|
870 |
+
generate_button_face = gr.Button( value="Go (T2V + Face VCD)", variant='primary' )
|
871 |
+
generate_button_tiled = gr.Button( value="Go (T2V + Face + Tiled VCD)", variant='primary' )
|
872 |
|
873 |
with gr.Row():
|
874 |
orig_video = gr.Video( label="Video after T2I VCD", interactive=False )
|
|
|
876 |
sr_video = gr.Video( label="Video after Tiled VCD", interactive=False )
|
877 |
|
878 |
inputs = [prompt_textbox, negative_prompt_textbox, id_embed_dropdown, gaussian_slider, seed_textbox]
|
879 |
+
outputs_t2v = [orig_video, json_config]
|
880 |
+
outputs_t2v_face = [orig_video, face_detailer_video, json_config]
|
881 |
+
outputs_t2v_face_tiled = [orig_video, face_detailer_video, sr_video, json_config]
|
882 |
|
883 |
+
generate_button_t2v.click( fn=c.run_t2v, inputs=inputs, outputs=outputs_t2v )
|
884 |
+
generate_button_face.click( fn=c.run_t2v_face, inputs=inputs, outputs=outputs_t2v_face )
|
885 |
+
generate_button_tiled.click( fn=c.run_t2v_face_tiled, inputs=inputs, outputs=outputs_t2v_face_tiled )
|
886 |
|
887 |
# gr.Examples( fn=c.run_once, examples=examples, inputs=inputs, outputs=outputs, cache_examples=True )
|
888 |
|
models/embeddings/emotion-angry.pt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e4d26073ea75df52343f5d1c8120ee30a6988e5ef47d42e754fb363a4178098c
|
3 |
-
size 37739
|
|
|
|
|
|
|
|
models/embeddings/emotion-defiance.pt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:84e0f50dc97bc42fadc6974f84ef14722156a8f27c0ee0cafd0496b46e5ed177
|
3 |
-
size 31595
|
|
|
|
|
|
|
|
models/embeddings/emotion-grin.pt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5aa6e7124a8c9d2ab01ea2ec827f2e0ab9074b2e7cef5627232e78aa1469dfd8
|
3 |
-
size 25451
|
|
|
|
|
|
|
|
models/embeddings/emotion-happy.pt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:4fa643103a068b7605a3b10d0b8b6c2f724b4f200b6b518ddc0cc68c7c8a3d8e
|
3 |
-
size 31595
|
|
|
|
|
|
|
|
models/embeddings/emotion-laugh.pt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:d970d25c2a99a22bd32620f2342c6f516a2cf29850f512050ec41a16e97bbe04
|
3 |
-
size 13163
|
|
|
|
|
|
|
|
models/embeddings/emotion-sad.pt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:d27225db52e647ae0ce452697e49a75a12e3a2bb2a05d6ee3bfb88536f2bcf2e
|
3 |
-
size 59243
|
|
|
|
|
|
|
|
models/embeddings/emotion-shock.pt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:156f0e7ecdaf98f07895f081b278cb9b679c3cc8908783a4dabb3735f92f3ece
|
3 |
-
size 46955
|
|
|
|
|
|
|
|
models/embeddings/emotion-smile.pt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:2ad4e0cac93203a4d88c7cee1b36de9f2fdd8b546013fa02fad36293af81fa7f
|
3 |
-
size 7019
|
|
|
|
|
|
|
|
models/embeddings/lisa.pt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:09ea6b2688b3e4903ba97bcf31f705fa140c52391ee3e342fd112f5bca0e43be
|
3 |
-
size 13099
|
|
|
|
|
|
|
|