fantaxy commited on
Commit
9000a51
·
verified ·
1 Parent(s): e2d0fd3

Delete webgui.py

Browse files
Files changed (1) hide show
  1. webgui.py +0 -321
webgui.py DELETED
@@ -1,321 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: UTF-8 -*-
3
- '''
4
- webui
5
- '''
6
-
7
- import os
8
- import random
9
- from datetime import datetime
10
- from pathlib import Path
11
-
12
- import cv2
13
- import numpy as np
14
- import torch
15
- from diffusers import AutoencoderKL, DDIMScheduler
16
- from omegaconf import OmegaConf
17
- from PIL import Image
18
- from src.models.unet_2d_condition import UNet2DConditionModel
19
- from src.models.unet_3d_echo import EchoUNet3DConditionModel
20
- from src.models.whisper.audio2feature import load_audio_model
21
- from src.pipelines.pipeline_echo_mimic import Audio2VideoPipeline
22
- from src.utils.util import save_videos_grid, crop_and_pad
23
- from src.models.face_locator import FaceLocator
24
- from moviepy.editor import VideoFileClip, AudioFileClip, ImageClip, vfx
25
- from facenet_pytorch import MTCNN
26
- import argparse
27
-
28
- import gradio as gr
29
-
30
- import huggingface_hub
31
-
32
- huggingface_hub.snapshot_download(
33
- repo_id='BadToBest/EchoMimic',
34
- local_dir='./pretrained_weights',
35
- local_dir_use_symlinks=False,
36
- )
37
-
38
- # 환경 변수 대신 코드 내에서 직접 설정
39
- is_shared_ui = False # 또는 True, 필요에 따라 설정
40
-
41
- # is_shared_ui의 값에 따라 available_property 설정
42
- available_property = not is_shared_ui
43
-
44
- # 이제 is_shared_ui와 available_property 변수는 코드 내에서 직접 관리됩니다.
45
- advanced_settings_label = "Advanced Settings"
46
-
47
- default_values = {
48
- "width": 512,
49
- "height": 512,
50
- "length": 1200,
51
- "seed": 420,
52
- "facemask_dilation_ratio": 0.1,
53
- "facecrop_dilation_ratio": 1.0,
54
- "context_frames": 12,
55
- "context_overlap": 3,
56
- "cfg": 2.5,
57
- "steps": 30,
58
- "sample_rate": 16000,
59
- "fps": 24,
60
- "device": "cuda"
61
- }
62
-
63
- ffmpeg_path = os.getenv('FFMPEG_PATH')
64
- if ffmpeg_path is None:
65
- print("please download ffmpeg-static and export to FFMPEG_PATH. \nFor example: export FFMPEG_PATH=/musetalk/ffmpeg-4.4-amd64-static")
66
- elif ffmpeg_path not in os.getenv('PATH'):
67
- print("add ffmpeg to path")
68
- os.environ["PATH"] = f"{ffmpeg_path}:{os.environ['PATH']}"
69
-
70
- config_path = "./configs/prompts/animation.yaml"
71
- config = OmegaConf.load(config_path)
72
- if config.weight_dtype == "fp16":
73
- weight_dtype = torch.float16
74
- else:
75
- weight_dtype = torch.float32
76
-
77
- device = "cuda"
78
- if not torch.cuda.is_available():
79
- device = "cpu"
80
-
81
- inference_config_path = config.inference_config
82
- infer_config = OmegaConf.load(inference_config_path)
83
-
84
- ############# model_init started #############
85
- ## vae init
86
- vae = AutoencoderKL.from_pretrained(config.pretrained_vae_path).to("cuda", dtype=weight_dtype)
87
-
88
- ## reference net init
89
- reference_unet = UNet2DConditionModel.from_pretrained(
90
- config.pretrained_base_model_path,
91
- subfolder="unet",
92
- ).to(dtype=weight_dtype, device=device)
93
- reference_unet.load_state_dict(torch.load(config.reference_unet_path, map_location="cpu"))
94
-
95
- ## denoising net init
96
- if os.path.exists(config.motion_module_path):
97
- ### stage1 + stage2
98
- denoising_unet = EchoUNet3DConditionModel.from_pretrained_2d(
99
- config.pretrained_base_model_path,
100
- config.motion_module_path,
101
- subfolder="unet",
102
- unet_additional_kwargs=infer_config.unet_additional_kwargs,
103
- ).to(dtype=weight_dtype, device=device)
104
- else:
105
- ### only stage1
106
- denoising_unet = EchoUNet3DConditionModel.from_pretrained_2d(
107
- config.pretrained_base_model_path,
108
- "",
109
- subfolder="unet",
110
- unet_additional_kwargs={
111
- "use_motion_module": False,
112
- "unet_use_temporal_attention": False,
113
- "cross_attention_dim": infer_config.unet_additional_kwargs.cross_attention_dim
114
- }
115
- ).to(dtype=weight_dtype, device=device)
116
-
117
- denoising_unet.load_state_dict(torch.load(config.denoising_unet_path, map_location="cpu"), strict=False)
118
-
119
- ## face locator init
120
- face_locator = FaceLocator(320, conditioning_channels=1, block_out_channels=(16, 32, 96, 256)).to(dtype=weight_dtype, device="cuda")
121
- face_locator.load_state_dict(torch.load(config.face_locator_path))
122
-
123
- ## load audio processor params
124
- audio_processor = load_audio_model(model_path=config.audio_model_path, device=device)
125
-
126
- ## load face detector params
127
- face_detector = MTCNN(image_size=320, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True, device=device)
128
-
129
- ############# model_init finished #############
130
-
131
- sched_kwargs = OmegaConf.to_container(infer_config.noise_scheduler_kwargs)
132
- scheduler = DDIMScheduler(**sched_kwargs)
133
-
134
- pipe = Audio2VideoPipeline(
135
- vae=vae,
136
- reference_unet=reference_unet,
137
- denoising_unet=denoising_unet,
138
- audio_guider=audio_processor,
139
- face_locator=face_locator,
140
- scheduler=scheduler,
141
- ).to("cuda", dtype=weight_dtype)
142
-
143
- def select_face(det_bboxes, probs):
144
- ## max face from faces that the prob is above 0.8
145
- ## box: xyxy
146
- if det_bboxes is None or probs is None:
147
- return None
148
- filtered_bboxes = []
149
- for bbox_i in range(len(det_bboxes)):
150
- if probs[bbox_i] > 0.8:
151
- filtered_bboxes.append(det_bboxes[bbox_i])
152
- if len(filtered_bboxes) == 0:
153
- return None
154
- sorted_bboxes = sorted(filtered_bboxes, key=lambda x:(x[3]-x[1]) * (x[2] - x[0]), reverse=True)
155
- return sorted_bboxes[0]
156
-
157
- def process_video(uploaded_img, uploaded_audio, width, height, length, seed, facemask_dilation_ratio, facecrop_dilation_ratio, context_frames, context_overlap, cfg, steps, sample_rate, fps, device):
158
-
159
- if seed is not None and seed > -1:
160
- generator = torch.manual_seed(seed)
161
- else:
162
- generator = torch.manual_seed(random.randint(100, 1000000))
163
-
164
- #### face musk prepare
165
- face_img = cv2.imread(uploaded_img)
166
- face_mask = np.zeros((face_img.shape[0], face_img.shape[1])).astype('uint8')
167
- det_bboxes, probs = face_detector.detect(face_img)
168
- select_bbox = select_face(det_bboxes, probs)
169
- if select_bbox is None:
170
- face_mask[:, :] = 255
171
- else:
172
- xyxy = select_bbox[:4]
173
- xyxy = np.round(xyxy).astype('int')
174
- rb, re, cb, ce = xyxy[1], xyxy[3], xyxy[0], xyxy[2]
175
- r_pad = int((re - rb) * facemask_dilation_ratio)
176
- c_pad = int((ce - cb) * facemask_dilation_ratio)
177
- face_mask[rb - r_pad : re + r_pad, cb - c_pad : ce + c_pad] = 255
178
-
179
- #### face crop
180
- r_pad_crop = int((re - rb) * facecrop_dilation_ratio)
181
- c_pad_crop = int((ce - cb) * facecrop_dilation_ratio)
182
- crop_rect = [max(0, cb - c_pad_crop), max(0, rb - r_pad_crop), min(ce + c_pad_crop, face_img.shape[1]), min(re + r_pad_crop, face_img.shape[0])]
183
- face_img = crop_and_pad(face_img, crop_rect)
184
- face_mask = crop_and_pad(face_mask, crop_rect)
185
- face_img = cv2.resize(face_img, (width, height))
186
- face_mask = cv2.resize(face_mask, (width, height))
187
-
188
- ref_image_pil = Image.fromarray(face_img[:, :, [2, 1, 0]])
189
- face_mask_tensor = torch.Tensor(face_mask).to(dtype=weight_dtype, device="cuda").unsqueeze(0).unsqueeze(0).unsqueeze(0) / 255.0
190
-
191
- video = pipe(
192
- ref_image_pil,
193
- uploaded_audio,
194
- face_mask_tensor,
195
- width,
196
- height,
197
- length,
198
- steps,
199
- cfg,
200
- generator=generator,
201
- audio_sample_rate=sample_rate,
202
- context_frames=context_frames,
203
- fps=fps,
204
- context_overlap=context_overlap
205
- ).videos
206
-
207
- save_dir = Path("output/tmp")
208
- save_dir.mkdir(exist_ok=True, parents=True)
209
- output_video_path = save_dir / "output_video.mp4"
210
- save_videos_grid(video, str(output_video_path), n_rows=1, fps=fps)
211
-
212
- video_clip = VideoFileClip(str(output_video_path))
213
- audio_clip = AudioFileClip(uploaded_audio)
214
-
215
- # 워터마크 이미지 로드 및 크기 조정
216
- watermark = (ImageClip("watermark.png") # 워터마크 이미지 경로
217
- .set_duration(video_clip.duration)
218
- .resize(height=50) # 워터마크 크기 조정
219
- .margin(right=8, bottom=8, opacity=0) # 마진 및 투명도 설정
220
- .set_pos(("right", "bottom"))) # 위치 설정
221
-
222
- final_clip = video_clip.set_audio(audio_clip).fx(vfx.composite, watermark)
223
-
224
- # APP.PY와 동일한 경로에 위치시키기
225
- final_output_path = Path(__file__).parent / "output_video_with_audio.mp4"
226
- final_clip.write_videofile(str(final_output_path), codec="libx264", audio_codec="aac")
227
-
228
- return final_output_path
229
-
230
-
231
- with gr.Blocks() as demo:
232
- gr.Markdown('# Mimic FACE')
233
-
234
- with gr.Row():
235
- with gr.Column():
236
- uploaded_img = gr.Image(type="filepath", label="Reference Image")
237
- uploaded_audio = gr.Audio(type="filepath", label="Input Audio")
238
- with gr.Accordion(label=advanced_settings_label, open=False):
239
- with gr.Row():
240
- width = gr.Slider(label="Width", minimum=128, maximum=1024, value=default_values["width"], interactive=available_property)
241
- height = gr.Slider(label="Height", minimum=128, maximum=1024, value=default_values["height"], interactive=available_property)
242
- with gr.Row():
243
- length = gr.Slider(label="Length", minimum=100, maximum=5000, value=default_values["length"], interactive=available_property)
244
- seed = gr.Slider(label="Seed", minimum=0, maximum=10000, value=default_values["seed"], interactive=available_property)
245
- with gr.Row():
246
- facemask_dilation_ratio = gr.Slider(label="Facemask Dilation Ratio", minimum=0.0, maximum=1.0, step=0.01, value=default_values["facemask_dilation_ratio"], interactive=available_property)
247
- facecrop_dilation_ratio = gr.Slider(label="Facecrop Dilation Ratio", minimum=0.0, maximum=1.0, step=0.01, value=default_values["facecrop_dilation_ratio"], interactive=available_property)
248
- with gr.Row():
249
- context_frames = gr.Slider(label="Context Frames", minimum=0, maximum=50, step=1, value=default_values["context_frames"], interactive=available_property)
250
- context_overlap = gr.Slider(label="Context Overlap", minimum=0, maximum=10, step=1, value=default_values["context_overlap"], interactive=available_property)
251
- with gr.Row():
252
- cfg = gr.Slider(label="CFG", minimum=0.0, maximum=10.0, step=0.1, value=default_values["cfg"], interactive=available_property)
253
- steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=default_values["steps"], interactive=available_property)
254
- with gr.Row():
255
- sample_rate = gr.Slider(label="Sample Rate", minimum=8000, maximum=48000, step=1000, value=default_values["sample_rate"], interactive=available_property)
256
- fps = gr.Slider(label="FPS", minimum=1, maximum=60, step=1, value=default_values["fps"], interactive=available_property)
257
- device = gr.Radio(label="Device", choices=["cuda", "cpu"], value=default_values["device"], interactive=available_property)
258
- generate_button = gr.Button("Generate Video")
259
- with gr.Column():
260
- output_video = gr.Video()
261
- gr.Examples(
262
- label = "Portrait examples",
263
- examples = [
264
- ['assets/test_imgs/a.png'],
265
- ],
266
- inputs = [uploaded_img]
267
- )
268
- gr.Examples(
269
- label = "Audio examples",
270
- examples = [
271
- ['assets/test_audios/a.wav'],
272
- ],
273
- inputs = [uploaded_audio]
274
- )
275
-
276
- def generate_video(uploaded_img, uploaded_audio, width, height, length, seed, facemask_dilation_ratio, facecrop_dilation_ratio, context_frames, context_overlap, cfg, steps, sample_rate, fps, device):
277
-
278
- final_output_path = process_video(
279
- uploaded_img, uploaded_audio, width, height, length, seed, facemask_dilation_ratio, facecrop_dilation_ratio, context_frames, context_overlap, cfg, steps, sample_rate, fps, device
280
- )
281
- output_video= final_output_path
282
- return final_output_path
283
-
284
- generate_button.click(
285
- generate_video,
286
- inputs=[
287
- uploaded_img,
288
- uploaded_audio,
289
- width,
290
- height,
291
- length,
292
- seed,
293
- facemask_dilation_ratio,
294
- facecrop_dilation_ratio,
295
- context_frames,
296
- context_overlap,
297
- cfg,
298
- steps,
299
- sample_rate,
300
- fps,
301
- device
302
- ],
303
- outputs=output_video,
304
- api_name="generate_video_api" # Expose API endpoint
305
- )
306
-
307
-
308
- parser = argparse.ArgumentParser(description='Mimic FACE')
309
- parser.add_argument('--server_name', type=str, default='0.0.0.0', help='Server name')
310
- parser.add_argument('--server_port', type=int, default=7860, help='Server port')
311
- args = parser.parse_args()
312
-
313
-
314
-
315
- if __name__ == '__main__':
316
- # demo.launch(
317
- demo.queue(max_size=4).launch(
318
- server_name=args.server_name,
319
- server_port=args.server_port,
320
- show_api=True # Enable API documentation
321
- )