fantaxy commited on
Commit
9ed6c80
·
verified ·
1 Parent(s): 2c33603

Delete webgui.py

Browse files
Files changed (1) hide show
  1. webgui.py +0 -318
webgui.py DELETED
@@ -1,318 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: UTF-8 -*-
3
- '''
4
- webui
5
- '''
6
-
7
- import os
8
- import random
9
- from datetime import datetime
10
- from pathlib import Path
11
-
12
- import cv2
13
- import numpy as np
14
- import torch
15
- import torch.multiprocessing as mp
16
- from diffusers import AutoencoderKL, DDIMScheduler
17
- from omegaconf import OmegaConf
18
- from PIL import Image
19
- from src.models.unet_2d_condition import UNet2DConditionModel
20
- from src.models.unet_3d_echo import EchoUNet3DConditionModel
21
- from src.models.whisper.audio2feature import load_audio_model
22
- from src.pipelines.pipeline_echo_mimic import Audio2VideoPipeline
23
- from src.utils.util import save_videos_grid, crop_and_pad
24
- from src.models.face_locator import FaceLocator
25
- from moviepy.editor import VideoFileClip, AudioFileClip
26
- from facenet_pytorch import MTCNN
27
- import argparse
28
-
29
- import gradio as gr
30
- import huggingface_hub
31
- import multiprocessing
32
-
33
- huggingface_hub.snapshot_download(
34
- repo_id='BadToBest/EchoMimic',
35
- local_dir='./pretrained_weights',
36
- local_dir_use_symlinks=False,
37
- )
38
-
39
- # 환경 변수 대신 코드 내에서 직접 설정
40
- is_shared_ui = False # 또는 True, 필요에 따라 설정
41
-
42
- # is_shared_ui의 값에 따라 available_property 설정
43
- available_property = not is_shared_ui
44
-
45
- # 이제 is_shared_ui와 available_property 변수는 코드 내에서 직접 관리됩니다.
46
- advanced_settings_label = "Advanced Settings"
47
-
48
- default_values = {
49
- "width": 512,
50
- "height": 512,
51
- "length": 1200,
52
- "seed": 420,
53
- "facemask_dilation_ratio": 0.1,
54
- "facecrop_dilation_ratio": 1.0,
55
- "context_frames": 12,
56
- "context_overlap": 3,
57
- "cfg": 2.5,
58
- "steps": 100,
59
- "sample_rate": 16000,
60
- "fps": 24,
61
- "device": "cuda"
62
- }
63
-
64
- ffmpeg_path = os.getenv('FFMPEG_PATH')
65
- if ffmpeg_path is None:
66
- print("please download ffmpeg-static and export to FFMPEG_PATH. \nFor example: export FFMPEG_PATH=/musetalk/ffmpeg-4.4-amd64-static")
67
- elif ffmpeg_path not in os.getenv('PATH'):
68
- print("add ffmpeg to path")
69
- os.environ["PATH"] = f"{ffmpeg_path}:{os.environ['PATH']}"
70
-
71
- config_path = "./configs/prompts/animation.yaml"
72
- config = OmegaConf.load(config_path)
73
- if config.weight_dtype == "fp16":
74
- weight_dtype = torch.float16
75
- else:
76
- weight_dtype = torch.float32
77
-
78
- device = "cuda"
79
- if not torch.cuda.is_available():
80
- device = "cpu"
81
-
82
- inference_config_path = config.inference_config
83
- infer_config = OmegaConf.load(inference_config_path)
84
-
85
- ############# model_init started #############
86
- ## vae init
87
- vae = AutoencoderKL.from_pretrained(config.pretrained_vae_path).to(device, dtype=weight_dtype)
88
-
89
- ## reference net init
90
- reference_unet = UNet2DConditionModel.from_pretrained(
91
- config.pretrained_base_model_path,
92
- subfolder="unet",
93
- ).to(device, dtype=weight_dtype)
94
- reference_unet.load_state_dict(torch.load(config.reference_unet_path, map_location="cpu"))
95
-
96
- ## denoising net init
97
- if os.path.exists(config.motion_module_path):
98
- ### stage1 + stage2
99
- denoising_unet = EchoUNet3DConditionModel.from_pretrained_2d(
100
- config.pretrained_base_model_path,
101
- config.motion_module_path,
102
- subfolder="unet",
103
- unet_additional_kwargs=infer_config.unet_additional_kwargs,
104
- ).to(device, dtype=weight_dtype)
105
- else:
106
- ### only stage1
107
- denoising_unet = EchoUNet3DConditionModel.from_pretrained_2d(
108
- config.pretrained_base_model_path,
109
- "",
110
- subfolder="unet",
111
- unet_additional_kwargs={
112
- "use_motion_module": False,
113
- "unet_use_temporal_attention": False,
114
- "cross_attention_dim": infer_config.unet_additional_kwargs.cross_attention_dim
115
- }
116
- ).to(device, dtype=weight_dtype)
117
-
118
- denoising_unet.load_state_dict(torch.load(config.denoising_unet_path, map_location="cpu"), strict=False)
119
-
120
- ## face locator init
121
- face_locator = FaceLocator(320, conditioning_channels=1, block_out_channels=(16, 32, 96, 256)).to(device, dtype=weight_dtype)
122
- face_locator.load_state_dict(torch.load(config.face_locator_path))
123
-
124
- ## load audio processor params
125
- audio_processor = load_audio_model(model_path=config.audio_model_path, device=device)
126
-
127
- ## load face detector params
128
- face_detector = MTCNN(image_size=320, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True, device=device)
129
-
130
- ############# model_init finished #############
131
-
132
- sched_kwargs = OmegaConf.to_container(infer_config.noise_scheduler_kwargs)
133
- scheduler = DDIMScheduler(**sched_kwargs)
134
-
135
- pipe = Audio2VideoPipeline(
136
- vae=vae,
137
- reference_unet=reference_unet,
138
- denoising_unet=denoising_unet,
139
- audio_guider=audio_processor,
140
- face_locator=face_locator,
141
- scheduler=scheduler,
142
- ).to(device, dtype=weight_dtype)
143
-
144
- def select_face(det_bboxes, probs):
145
- ## max face from faces that the prob is above 0.8
146
- ## box: xyxy
147
- if det_bboxes is None or probs is None:
148
- return None
149
- filtered_bboxes = []
150
- for bbox_i in range(len(det_bboxes)):
151
- if probs[bbox_i] > 0.8:
152
- filtered_bboxes.append(det_bboxes[bbox_i])
153
- if len(filtered_bboxes) == 0:
154
- return None
155
- sorted_bboxes = sorted(filtered_bboxes, key=lambda x:(x[3]-x[1]) * (x[2] - x[0]), reverse=True)
156
- return sorted_bboxes[0]
157
-
158
- def process_video(uploaded_img, uploaded_audio, width, height, length, seed, facemask_dilation_ratio, facecrop_dilation_ratio, context_frames, context_overlap, cfg, steps, sample_rate, fps, device):
159
-
160
- if seed is not None and seed > -1:
161
- generator = torch.manual_seed(seed)
162
- else:
163
- generator = torch.manual_seed(random.randint(100, 1000000))
164
-
165
- #### face musk prepare
166
- face_img = cv2.imread(uploaded_img)
167
- face_mask = np.zeros((face_img.shape[0], face_img.shape[1])).astype('uint8')
168
- det_bboxes, probs = face_detector.detect(face_img)
169
- select_bbox = select_face(det_bboxes, probs)
170
- if select_bbox is None:
171
- face_mask[:, :] = 255
172
- else:
173
- xyxy = select_bbox[:4]
174
- xyxy = np.round(xyxy).astype('int')
175
- rb, re, cb, ce = xyxy[1], xyxy[3], xyxy[0], xyxy[2]
176
- r_pad = int((re - rb) * facemask_dilation_ratio)
177
- c_pad = int((ce - cb) * facemask_dilation_ratio)
178
- face_mask[rb - r_pad : re + r_pad, cb - c_pad : ce + c_pad] = 255
179
-
180
- #### face crop
181
- r_pad_crop = int((re - rb) * facecrop_dilation_ratio)
182
- c_pad_crop = int((ce - cb) * facecrop_dilation_ratio)
183
- crop_rect = [max(0, cb - c_pad_crop), max(0, rb - r_pad_crop), min(ce + c_pad_crop, face_img.shape[1]), min(re + r_pad_crop, face_img.shape[0])]
184
- face_img = crop_and_pad(face_img, crop_rect)
185
- face_mask = crop_and_pad(face_mask, crop_rect)
186
- face_img = cv2.resize(face_img, (width, height))
187
- face_mask = cv2.resize(face_mask, (width, height))
188
-
189
- ref_image_pil = Image.fromarray(face_img[:, :, [2, 1, 0]])
190
- face_mask_tensor = torch.Tensor(face_mask).to(dtype=weight_dtype, device="cuda").unsqueeze(0).unsqueeze(0).unsqueeze(0) / 255.0
191
-
192
- video = pipe(
193
- ref_image_pil,
194
- uploaded_audio,
195
- face_mask_tensor,
196
- width,
197
- height,
198
- length,
199
- steps,
200
- cfg,
201
- generator=generator,
202
- audio_sample_rate=sample_rate,
203
- context_frames=context_frames,
204
- fps=fps,
205
- context_overlap=context_overlap
206
- ).videos
207
-
208
- save_dir = Path("output/tmp")
209
- save_dir.mkdir(exist_ok=True, parents=True)
210
- output_video_path = save_dir / "output_video.mp4"
211
- save_videos_grid(video, str(output_video_path), n_rows=1, fps=fps)
212
-
213
- video_clip = VideoFileClip(str(output_video_path))
214
- audio_clip = AudioFileClip(uploaded_audio)
215
- final_output_path = save_dir / "output_video_with_audio.mp4"
216
- video_clip = video_clip.set_audio(audio_clip)
217
- video_clip.write_videofile(str(final_output_path), codec="libx264", audio_codec="aac")
218
-
219
- return final_output_path
220
-
221
- def set_cuda_device_and_process(device_id, *args):
222
- os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
223
- return process_video(*args)
224
-
225
- def generate_video(uploaded_img, uploaded_audio, width, height, length, seed, facemask_dilation_ratio, facecrop_dilation_ratio, context_frames, context_overlap, cfg, steps, sample_rate, fps, device):
226
- # Use GPU 1, 2, 3 for processing
227
- devices = [1, 2, 3]
228
- pool = mp.Pool(len(devices))
229
- results = [pool.apply_async(set_cuda_device_and_process, args=(device, uploaded_img, uploaded_audio, width, height, length, seed, facemask_dilation_ratio, facecrop_dilation_ratio, context_frames, context_overlap, cfg, steps, sample_rate, fps, "cuda")) for device in devices]
230
- pool.close()
231
- pool.join()
232
-
233
- output_paths = [result.get() for result in results]
234
- return output_paths[0] # Return the first result or handle as needed
235
-
236
- with gr.Blocks() as demo:
237
- gr.Markdown('# Mimic FACE')
238
-
239
- with gr.Row():
240
- with gr.Column():
241
- uploaded_img = gr.Image(type="filepath", label="Reference Image")
242
- uploaded_audio = gr.Audio(type="filepath", label="Input Audio")
243
- with gr.Accordion(label=advanced_settings_label, open=False):
244
- with gr.Row():
245
- width = gr.Slider(label="Width", minimum=128, maximum=1024, value=default_values["width"], interactive=available_property)
246
- height = gr.Slider(label="Height", minimum=128, maximum=1024, value=default_values["height"], interactive=available_property)
247
- with gr.Row():
248
- length = gr.Slider(label="Length", minimum=100, maximum=5000, value=default_values["length"], interactive=available_property)
249
- seed = gr.Slider(label="Seed", minimum=0, maximum=10000, value=default_values["seed"], interactive=available_property)
250
- with gr.Row():
251
- facemask_dilation_ratio = gr.Slider(label="Facemask Dilation Ratio", minimum=0.0, maximum=1.0, step=0.01, value=default_values["facemask_dilation_ratio"], interactive=available_property)
252
- facecrop_dilation_ratio = gr.Slider(label="Facecrop Dilation Ratio", minimum=0.0, maximum=1.0, step=0.01, value=default_values["facecrop_dilation_ratio"], interactive=available_property)
253
- with gr.Row():
254
- context_frames = gr.Slider(label="Context Frames", minimum=0, maximum=50, step=1, value=default_values["context_frames"], interactive=available_property)
255
- context_overlap = gr.Slider(label="Context Overlap", minimum=0, maximum=10, step=1, value=default_values["context_overlap"], interactive=available_property)
256
- with gr.Row():
257
- cfg = gr.Slider(label="CFG", minimum=0.0, maximum=10.0, step=0.1, value=default_values["cfg"], interactive=available_property)
258
- steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=default_values["steps"], interactive=available_property)
259
- with gr.Row():
260
- sample_rate = gr.Slider(label="Sample Rate", minimum=8000, maximum=48000, step=1000, value=default_values["sample_rate"], interactive=available_property)
261
- fps = gr.Slider(label="FPS", minimum=1, maximum=60, step=1, value=default_values["fps"], interactive=available_property)
262
- device = gr.Radio(label="Device", choices=["cuda", "cpu"], value=default_values["device"], interactive=available_property)
263
- generate_button = gr.Button("Generate Video")
264
- with gr.Column():
265
- output_video = gr.Video()
266
- gr.Examples(
267
- label = "Portrait examples",
268
- examples = [
269
- ['assets/test_imgs/a.png'],
270
- ],
271
- inputs = [uploaded_img]
272
- )
273
- gr.Examples(
274
- label = "Audio examples",
275
- examples = [
276
- ['assets/test_audios/chunnuanhuakai.wav'],
277
- ],
278
- inputs = [uploaded_audio]
279
- )
280
-
281
- generate_button.click(
282
- generate_video,
283
- inputs=[
284
- uploaded_img,
285
- uploaded_audio,
286
- width,
287
- height,
288
- length,
289
- seed,
290
- facemask_dilation_ratio,
291
- facecrop_dilation_ratio,
292
- context_frames,
293
- context_overlap,
294
- cfg,
295
- steps,
296
- sample_rate,
297
- fps,
298
- device
299
- ],
300
- outputs=output_video,
301
- api_name="generate_video_api" # Expose API endpoint
302
- )
303
-
304
- parser = argparse.ArgumentParser(description='Mimic FACE')
305
- parser.add_argument('--server_name', type=str, default='0.0.0.0', help='Server name')
306
- parser.add_argument('--server_port', type=int, default=7860, help='Server port')
307
- args = parser.parse_args()
308
-
309
- # 'spawn' 시작 방법을 설정하기 전에 먼저 설정되어 있는지 확인합니다.
310
- if __name__ == '__main__':
311
- if multiprocessing.get_start_method(allow_none=True) != 'spawn':
312
- multiprocessing.set_start_method('spawn')
313
-
314
- demo.queue(max_size=4).launch(
315
- server_name=args.server_name,
316
- server_port=args.server_port,
317
- show_api=True # Enable API documentation
318
- )