Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,111 +1,217 @@
|
|
1 |
-
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import gradio as gr
|
3 |
-
from
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
in_webui = False
|
11 |
|
|
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
else:
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
if
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
else:
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
with gr.Row():
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
with gr.
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
driven_audio,
|
78 |
-
preprocess_type,
|
79 |
-
is_still_mode,
|
80 |
-
enhancer,
|
81 |
-
batch_size,
|
82 |
-
size_of_image,
|
83 |
-
pose_style
|
84 |
-
],
|
85 |
-
outputs=[gen_video]
|
86 |
-
)
|
87 |
-
else:
|
88 |
-
submit.click(
|
89 |
-
fn=sad_talker.test,
|
90 |
-
inputs=[source_image,
|
91 |
-
driven_audio,
|
92 |
-
preprocess_type,
|
93 |
-
is_still_mode,
|
94 |
-
enhancer,
|
95 |
-
batch_size,
|
96 |
-
size_of_image,
|
97 |
-
pose_style
|
98 |
-
],
|
99 |
-
outputs=[gen_video]
|
100 |
-
)
|
101 |
-
|
102 |
-
return sadtalker_interface
|
103 |
-
|
104 |
-
|
105 |
-
if __name__ == "__main__":
|
106 |
-
|
107 |
-
demo = sadtalker_demo()
|
108 |
-
demo.queue()
|
109 |
-
demo.launch()
|
110 |
|
|
|
|
|
|
|
111 |
|
|
|
|
1 |
+
import os
|
2 |
+
import platform
|
3 |
+
import uuid
|
4 |
+
import shutil
|
5 |
+
from pydub import AudioSegment
|
6 |
+
import spaces
|
7 |
+
import torch
|
8 |
import gradio as gr
|
9 |
+
from huggingface_hub import snapshot_download
|
10 |
|
11 |
+
from examples.get_examples import get_examples
|
12 |
+
from src.facerender.pirender_animate import AnimateFromCoeff_PIRender
|
13 |
+
from src.utils.preprocess import CropAndExtract
|
14 |
+
from src.test_audio2coeff import Audio2Coeff
|
15 |
+
from src.facerender.animate import AnimateFromCoeff
|
16 |
+
from src.generate_batch import get_data
|
17 |
+
from src.generate_facerender_batch import get_facerender_data
|
18 |
+
from src.utils.init_path import init_path
|
19 |
|
20 |
+
checkpoint_path = 'checkpoints'
|
21 |
+
config_path = 'src/config'
|
22 |
+
device = "cuda" if torch.cuda.is_available(
|
23 |
+
) else "mps" if platform.system() == 'Darwin' else "cpu"
|
|
|
24 |
|
25 |
+
os.environ['TORCH_HOME'] = checkpoint_path
|
26 |
+
snapshot_download(repo_id='vinthony/SadTalker-V002rc',
|
27 |
+
local_dir=checkpoint_path, local_dir_use_symlinks=True)
|
28 |
|
29 |
+
|
30 |
+
def mp3_to_wav(mp3_filename, wav_filename, frame_rate):
|
31 |
+
AudioSegment.from_file(file=mp3_filename).set_frame_rate(
|
32 |
+
frame_rate).export(wav_filename, format="wav")
|
33 |
+
|
34 |
+
|
35 |
+
@spaces.GPU(duration=120)
|
36 |
+
def generate_video(source_image, driven_audio, preprocess='crop', still_mode=False, use_enhancer=False,
|
37 |
+
batch_size=1, size=256, pose_style=0, facerender='facevid2vid', exp_scale=1.0,
|
38 |
+
use_ref_video=False, ref_video=None, ref_info=None, use_idle_mode=False,
|
39 |
+
length_of_audio=0, use_blink=True, result_dir='./results/'):
|
40 |
+
# Initialize models and paths
|
41 |
+
sadtalker_paths = init_path(
|
42 |
+
checkpoint_path, config_path, size, False, preprocess)
|
43 |
+
audio_to_coeff = Audio2Coeff(sadtalker_paths, device)
|
44 |
+
preprocess_model = CropAndExtract(sadtalker_paths, device)
|
45 |
+
animate_from_coeff = AnimateFromCoeff(sadtalker_paths, device) if facerender == 'facevid2vid' and device != 'mps' \
|
46 |
+
else AnimateFromCoeff_PIRender(sadtalker_paths, device)
|
47 |
+
|
48 |
+
# Create directories for saving results
|
49 |
+
time_tag = str(uuid.uuid4())
|
50 |
+
save_dir = os.path.join(result_dir, time_tag)
|
51 |
+
os.makedirs(save_dir, exist_ok=True)
|
52 |
+
input_dir = os.path.join(save_dir, 'input')
|
53 |
+
os.makedirs(input_dir, exist_ok=True)
|
54 |
+
|
55 |
+
# Process source image
|
56 |
+
pic_path = os.path.join(input_dir, os.path.basename(source_image))
|
57 |
+
shutil.move(source_image, input_dir)
|
58 |
+
|
59 |
+
# Process driven audio
|
60 |
+
if driven_audio and os.path.isfile(driven_audio):
|
61 |
+
audio_path = os.path.join(input_dir, os.path.basename(driven_audio))
|
62 |
+
if '.mp3' in audio_path:
|
63 |
+
mp3_to_wav(driven_audio, audio_path.replace('.mp3', '.wav'), 16000)
|
64 |
+
audio_path = audio_path.replace('.mp3', '.wav')
|
65 |
+
else:
|
66 |
+
shutil.move(driven_audio, input_dir)
|
67 |
+
elif use_idle_mode:
|
68 |
+
audio_path = os.path.join(
|
69 |
+
input_dir, 'idlemode_'+str(length_of_audio)+'.wav')
|
70 |
+
AudioSegment.silent(
|
71 |
+
duration=1000*length_of_audio).export(audio_path, format="wav")
|
72 |
else:
|
73 |
+
assert use_ref_video and ref_info == 'all'
|
74 |
+
|
75 |
+
# Process reference video
|
76 |
+
if use_ref_video and ref_info == 'all':
|
77 |
+
ref_video_videoname = os.path.splitext(os.path.split(ref_video)[-1])[0]
|
78 |
+
audio_path = os.path.join(save_dir, ref_video_videoname+'.wav')
|
79 |
+
os.system(
|
80 |
+
f"ffmpeg -y -hide_banner -loglevel error -i {ref_video} {audio_path}")
|
81 |
+
ref_video_frame_dir = os.path.join(save_dir, ref_video_videoname)
|
82 |
+
os.makedirs(ref_video_frame_dir, exist_ok=True)
|
83 |
+
ref_video_coeff_path, _, _ = preprocess_model.generate(
|
84 |
+
ref_video, ref_video_frame_dir, preprocess, source_image_flag=False)
|
85 |
else:
|
86 |
+
ref_video_coeff_path = None
|
87 |
+
|
88 |
+
# Preprocess source image
|
89 |
+
first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
|
90 |
+
os.makedirs(first_frame_dir, exist_ok=True)
|
91 |
+
first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(
|
92 |
+
pic_path, first_frame_dir, preprocess, True, size)
|
93 |
+
if first_coeff_path is None:
|
94 |
+
raise AttributeError("No face is detected")
|
95 |
+
|
96 |
+
# Determine reference coefficients
|
97 |
+
ref_pose_coeff_path, ref_eyeblink_coeff_path = None, None
|
98 |
+
if use_ref_video:
|
99 |
+
if ref_info == 'pose':
|
100 |
+
ref_pose_coeff_path = ref_video_coeff_path
|
101 |
+
elif ref_info == 'blink':
|
102 |
+
ref_eyeblink_coeff_path = ref_video_coeff_path
|
103 |
+
elif ref_info == 'pose+blink':
|
104 |
+
ref_pose_coeff_path = ref_eyeblink_coeff_path = ref_video_coeff_path
|
105 |
+
else:
|
106 |
+
ref_pose_coeff_path = ref_eyeblink_coeff_path = None
|
107 |
+
|
108 |
+
# Generate coefficients from audio or reference video
|
109 |
+
if use_ref_video and ref_info == 'all':
|
110 |
+
coeff_path = ref_video_coeff_path
|
111 |
+
else:
|
112 |
+
batch = get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path=ref_eyeblink_coeff_path,
|
113 |
+
still=still_mode, idlemode=use_idle_mode, length_of_audio=length_of_audio, use_blink=use_blink)
|
114 |
+
coeff_path = audio_to_coeff.generate(
|
115 |
+
batch, save_dir, pose_style, ref_pose_coeff_path)
|
116 |
+
|
117 |
+
# Generate video from coefficients
|
118 |
+
data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode,
|
119 |
+
preprocess=preprocess, size=size, expression_scale=exp_scale, facemodel=facerender)
|
120 |
+
return_path = animate_from_coeff.generate(data, save_dir, pic_path, crop_info, enhancer='gfpgan' if use_enhancer else None,
|
121 |
+
preprocess=preprocess, img_size=size)
|
122 |
+
video_name = data['video_name']
|
123 |
+
print(f'The generated video is named {video_name} in {save_dir}')
|
124 |
+
|
125 |
+
return return_path
|
126 |
+
|
127 |
+
|
128 |
+
# Gradio UI
|
129 |
+
with gr.Blocks(analytics_enabled=False) as demo:
|
130 |
+
with gr.Row():
|
131 |
+
with gr.Column(variant='panel'):
|
132 |
+
with gr.Tabs(elem_id="sadtalker_source_image"):
|
133 |
+
with gr.TabItem('Source image'):
|
134 |
+
with gr.Row():
|
135 |
+
source_image = gr.Image(
|
136 |
+
label="Source image", sources="upload", type="filepath", elem_id="img2img_image")
|
137 |
+
|
138 |
+
with gr.Tabs(elem_id="sadtalker_driven_audio"):
|
139 |
+
with gr.TabItem('Driving Methods'):
|
140 |
+
gr.Markdown(
|
141 |
+
"Possible driving combinations: <br> 1. Audio only 2. Audio/IDLE Mode + Ref Video(pose, blink, pose+blink) 3. IDLE Mode only 4. Ref Video only (all) ")
|
142 |
+
|
143 |
+
with gr.Row():
|
144 |
+
driven_audio = gr.Audio(
|
145 |
+
label="Input audio", sources="upload", type="filepath")
|
146 |
+
driven_audio_no = gr.Audio(
|
147 |
+
label="Use IDLE mode, no audio is required", sources="upload", type="filepath", visible=False)
|
148 |
+
|
149 |
+
with gr.Column():
|
150 |
+
use_idle_mode = gr.Checkbox(
|
151 |
+
label="Use Idle Animation")
|
152 |
+
length_of_audio = gr.Number(
|
153 |
+
value=5, label="The length(seconds) of the generated video.")
|
154 |
+
use_idle_mode.change(lambda choice: (gr.update(visible=not choice), gr.update(visible=choice)),
|
155 |
+
inputs=use_idle_mode, outputs=[driven_audio, driven_audio_no])
|
156 |
+
|
157 |
+
with gr.Row():
|
158 |
+
ref_video = gr.Video(
|
159 |
+
label="Reference Video", sources="upload", elem_id="vidref")
|
160 |
+
|
161 |
+
with gr.Column():
|
162 |
+
use_ref_video = gr.Checkbox(
|
163 |
+
label="Use Reference Video")
|
164 |
+
ref_info = gr.Radio(['pose', 'blink', 'pose+blink', 'all'], value='pose', label='Reference Video',
|
165 |
+
info="How to borrow from reference Video?((fully transfer, aka, video driving mode))")
|
166 |
+
|
167 |
+
ref_video.change(lambda path: gr.update(
|
168 |
+
value=path is not None), inputs=ref_video, outputs=use_ref_video)
|
169 |
+
|
170 |
+
with gr.Column(variant='panel'):
|
171 |
+
with gr.Tabs(elem_id="sadtalker_checkbox"):
|
172 |
+
with gr.TabItem('Settings'):
|
173 |
+
with gr.Column(variant='panel'):
|
174 |
with gr.Row():
|
175 |
+
pose_style = gr.Slider(
|
176 |
+
minimum=0, maximum=45, step=1, label="Pose style", value=0)
|
177 |
+
exp_weight = gr.Slider(
|
178 |
+
minimum=0, maximum=3, step=0.1, label="expression scale", value=1)
|
179 |
+
blink_every = gr.Checkbox(
|
180 |
+
label="use eye blink", value=True)
|
181 |
+
|
182 |
+
with gr.Row():
|
183 |
+
size_of_image = gr.Radio(
|
184 |
+
[256, 512], value=256, label='face model resolution', info="use 256/512 model?")
|
185 |
+
preprocess_type = gr.Radio(
|
186 |
+
['crop', 'resize', 'full', 'extcrop', 'extfull'], value='crop', label='preprocess', info="How to handle input image?")
|
187 |
+
|
188 |
+
with gr.Row():
|
189 |
+
is_still_mode = gr.Checkbox(
|
190 |
+
label="Still Mode (fewer head motion, works with preprocess `full`)")
|
191 |
+
facerender = gr.Radio(
|
192 |
+
['facevid2vid', 'pirender'], value='facevid2vid', label='facerender', info="which face render?")
|
193 |
+
|
194 |
+
with gr.Row():
|
195 |
+
batch_size = gr.Slider(
|
196 |
+
label="batch size in generation", step=1, maximum=10, value=1)
|
197 |
+
enhancer = gr.Checkbox(
|
198 |
+
label="GFPGAN as Face enhancer", value=True)
|
199 |
+
|
200 |
+
submit = gr.Button(
|
201 |
+
'Generate', elem_id="sadtalker_generate", variant='primary')
|
202 |
+
|
203 |
+
with gr.Tabs(elem_id="sadtalker_generated"):
|
204 |
+
gen_video = gr.Video(label="Generated video")
|
205 |
+
|
206 |
+
submit.click(
|
207 |
+
fn=generate_video,
|
208 |
+
inputs=[source_image, driven_audio, preprocess_type, is_still_mode, enhancer, batch_size, size_of_image,
|
209 |
+
pose_style, facerender, exp_weight, use_ref_video, ref_video, ref_info, use_idle_mode, length_of_audio, blink_every],
|
210 |
+
outputs=[gen_video],
|
211 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
|
213 |
+
with gr.Row():
|
214 |
+
gr.Examples(examples=get_examples(), inputs=[source_image, driven_audio, preprocess_type, is_still_mode, enhancer],
|
215 |
+
outputs=[gen_video], fn=generate_video)
|
216 |
|
217 |
+
demo.launch(debug=True)
|