meepmoo commited on
Commit
2add35e
·
verified ·
1 Parent(s): 07cb044

Create worker_runpod_gradio.py

Browse files
Files changed (1) hide show
  1. worker_runpod_gradio.py +139 -0
worker_runpod_gradio.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, random, time
2
+ import torch
3
+ from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler
4
+ from tqdm import tqdm
5
+
6
+ from memo.models.audio_proj import AudioProjModel
7
+ from memo.models.image_proj import ImageProjModel
8
+ from memo.models.unet_2d_condition import UNet2DConditionModel
9
+ from memo.models.unet_3d import UNet3DConditionModel
10
+ from memo.pipelines.video_pipeline import VideoPipeline
11
+ from memo.utils.audio_utils import extract_audio_emotion_labels, preprocess_audio, resample_audio
12
+ from memo.utils.vision_utils import preprocess_image, tensor_to_video
13
+
14
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
15
+ weight_dtype = torch.bfloat16
16
+
17
+ with torch.inference_mode():
18
+ vae = AutoencoderKL.from_pretrained("/content/memo/checkpoints/vae").to(device=device, dtype=weight_dtype)
19
+ reference_net = UNet2DConditionModel.from_pretrained("/content/memo/checkpoints", subfolder="reference_net", use_safetensors=True)
20
+ diffusion_net = UNet3DConditionModel.from_pretrained("/content/memo/checkpoints", subfolder="diffusion_net", use_safetensors=True)
21
+ image_proj = ImageProjModel.from_pretrained("/content/memo/checkpoints", subfolder="image_proj", use_safetensors=True)
22
+ audio_proj = AudioProjModel.from_pretrained("/content/memo/checkpoints", subfolder="audio_proj", use_safetensors=True)
23
+
24
+ vae.requires_grad_(False).eval()
25
+ reference_net.requires_grad_(False).eval()
26
+ diffusion_net.requires_grad_(False).eval()
27
+ image_proj.requires_grad_(False).eval()
28
+ audio_proj.requires_grad_(False).eval()
29
+ reference_net.enable_xformers_memory_efficient_attention()
30
+ diffusion_net.enable_xformers_memory_efficient_attention()
31
+
32
+ noise_scheduler = FlowMatchEulerDiscreteScheduler()
33
+ pipeline = VideoPipeline(vae=vae, reference_net=reference_net, diffusion_net=diffusion_net, scheduler=noise_scheduler, image_proj=image_proj)
34
+ pipeline.to(device=device, dtype=weight_dtype)
35
+
36
+ @torch.inference_mode()
37
+ def generate(input_video, input_audio, seed):
38
+ resolution = 512
39
+ num_generated_frames_per_clip = 16
40
+ fps = 30
41
+ num_init_past_frames = 2
42
+ num_past_frames = 16
43
+ inference_steps = 20
44
+ cfg_scale = 3.5
45
+
46
+ if seed == 0:
47
+ random.seed(int(time.time()))
48
+ seed = random.randint(0, 18446744073709551615)
49
+
50
+ generator = torch.manual_seed(seed)
51
+ img_size = (resolution, resolution)
52
+ pixel_values, face_emb = preprocess_image(face_analysis_model="/content/memo/checkpoints/misc/face_analysis", image_path=input_video, image_size=resolution)
53
+
54
+ output_dir = "/content/memo/outputs"
55
+ os.makedirs(output_dir, exist_ok=True)
56
+ cache_dir = os.path.join(output_dir, "audio_preprocess")
57
+ os.makedirs(cache_dir, exist_ok=True)
58
+ input_audio = resample_audio(input_audio, os.path.join(cache_dir, f"{os.path.basename(input_audio).split('.')[0]}-16k.wav"))
59
+
60
+ audio_emb, audio_length = preprocess_audio(
61
+ wav_path=input_audio,
62
+ num_generated_frames_per_clip=num_generated_frames_per_clip,
63
+ fps=fps,
64
+ wav2vec_model="/content/memo/checkpoints/wav2vec2",
65
+ vocal_separator_model="/content/memo/checkpoints/misc/vocal_separator/Kim_Vocal_2.onnx",
66
+ cache_dir=cache_dir,
67
+ device=device,
68
+ )
69
+ audio_emotion, num_emotion_classes = extract_audio_emotion_labels(
70
+ model="/content/memo/checkpoints",
71
+ wav_path=input_audio,
72
+ emotion2vec_model="/content/memo/checkpoints/emotion2vec_plus_large",
73
+ audio_length=audio_length,
74
+ device=device,
75
+ )
76
+
77
+ video_frames = []
78
+ num_clips = audio_emb.shape[0] // num_generated_frames_per_clip
79
+ for t in tqdm(range(num_clips), desc="Generating video clips"):
80
+ if len(video_frames) == 0:
81
+ past_frames = pixel_values.repeat(num_init_past_frames, 1, 1, 1)
82
+ past_frames = past_frames.to(dtype=pixel_values.dtype, device=pixel_values.device)
83
+ pixel_values_ref_img = torch.cat([pixel_values, past_frames], dim=0)
84
+ else:
85
+ past_frames = video_frames[-1][0]
86
+ past_frames = past_frames.permute(1, 0, 2, 3)
87
+ past_frames = past_frames[0 - num_past_frames :]
88
+ past_frames = past_frames * 2.0 - 1.0
89
+ past_frames = past_frames.to(dtype=pixel_values.dtype, device=pixel_values.device)
90
+ pixel_values_ref_img = torch.cat([pixel_values, past_frames], dim=0)
91
+
92
+ pixel_values_ref_img = pixel_values_ref_img.unsqueeze(0)
93
+ audio_tensor = (audio_emb[t * num_generated_frames_per_clip : min((t + 1) * num_generated_frames_per_clip, audio_emb.shape[0])].unsqueeze(0).to(device=audio_proj.device, dtype=audio_proj.dtype))
94
+ audio_tensor = audio_proj(audio_tensor)
95
+ audio_emotion_tensor = audio_emotion[t * num_generated_frames_per_clip : min((t + 1) * num_generated_frames_per_clip, audio_emb.shape[0])]
96
+
97
+ pipeline_output = pipeline(
98
+ ref_image=pixel_values_ref_img,
99
+ audio_tensor=audio_tensor,
100
+ audio_emotion=audio_emotion_tensor,
101
+ emotion_class_num=num_emotion_classes,
102
+ face_emb=face_emb,
103
+ width=img_size[0],
104
+ height=img_size[1],
105
+ video_length=num_generated_frames_per_clip,
106
+ num_inference_steps=inference_steps,
107
+ guidance_scale=cfg_scale,
108
+ generator=generator,
109
+ )
110
+ video_frames.append(pipeline_output.videos)
111
+
112
+ video_frames = torch.cat(video_frames, dim=2)
113
+ video_frames = video_frames.squeeze(0)
114
+ video_frames = video_frames[:, :audio_length]
115
+
116
+ video_path = f"/content/memo-{seed}-tost.mp4"
117
+ tensor_to_video(video_frames, video_path, input_audio, fps=fps)
118
+
119
+ return video_path
120
+
121
+ import gradio as gr
122
+
123
+ with gr.Blocks(css=".gradio-container {max-width: 1080px !important}", analytics_enabled=False) as demo:
124
+ with gr.Row():
125
+ with gr.Column():
126
+ input_video = gr.Image(label="Upload Input Image", type="filepath")
127
+ input_audio = gr.Audio(label="Upload Input Audio", type="filepath")
128
+ seed = gr.Number(label="Seed (0 for Random)", value=0, precision=0)
129
+ with gr.Column():
130
+ video_output = gr.Video(label="Generated Video")
131
+ generate_button = gr.Button("Generate")
132
+
133
+ generate_button.click(
134
+ fn=generate,
135
+ inputs=[input_video, input_audio, seed],
136
+ outputs=[video_output],
137
+ )
138
+
139
+ demo.queue().launch(inline=False, share=False, debug=True, server_name='0.0.0.0', server_port=7860, allowed_paths=["/content"])