Spaces:
Runtime error
Runtime error
add merge.py
Browse files- colab_flask.py +18 -72
- gfpgan/inference_gfpgan.py +23 -19
- merge.py +77 -0
colab_flask.py
CHANGED
@@ -79,13 +79,13 @@ def call_wav2lip(video_path, audio_path, output_path):
|
|
79 |
cmd = [
|
80 |
"python",
|
81 |
"wav2lip/inference.py",
|
82 |
-
"--checkpoint_path", checkpoint_path,
|
83 |
# "--segmentation_path", "checkpoints/face_segmentation.pth",
|
84 |
"--face", video_path,
|
85 |
"--audio", audio_path,
|
86 |
"--outfile", output_path,
|
87 |
]
|
88 |
-
|
89 |
proc = subprocess.Popen(cmd, shell=False)
|
90 |
proc.communicate()
|
91 |
duration = datetime.now() - start
|
@@ -128,67 +128,30 @@ def call_gfpgan(wav2lip_mp4, audio_path, output_mp4):
|
|
128 |
"gfpgan/inference_gfpgan.py",
|
129 |
"-i", origin_frames_folder,
|
130 |
"-o", folder_path,
|
131 |
-
"-v", str(1.4),
|
132 |
-
"-s", str(2),
|
133 |
"--only_center_face",
|
134 |
"--bg_upsampler", 'realesrgan'
|
135 |
]
|
136 |
print(cmd)
|
137 |
-
proc = subprocess.Popen(cmd, shell=
|
138 |
proc.communicate()
|
139 |
duration = datetime.now() - start
|
140 |
print(f'inference_gfpgan finished in {duration}')
|
141 |
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
if img is None:
|
156 |
-
continue
|
157 |
-
height, width, layers = img.shape
|
158 |
-
size = (width, height)
|
159 |
-
img_array.append(img)
|
160 |
-
|
161 |
-
fourcc = cv2.VideoWriter_fourcc(*'X264')
|
162 |
-
filename = 'batch_' + str(batch).zfill(4) + '.mp4'
|
163 |
-
out = cv2.VideoWriter(os.path.join(folder_path, filename),
|
164 |
-
fourcc, 30, size)
|
165 |
-
batch = batch + 1
|
166 |
-
|
167 |
-
for i in range(len(img_array)):
|
168 |
-
out.write(img_array[i])
|
169 |
-
out.release()
|
170 |
-
|
171 |
-
concatTextFilePath = os.path.join(folder_path, "concat.txt")
|
172 |
-
|
173 |
-
concatTextFile = open(concatTextFilePath, "w")
|
174 |
-
for ips in range(batch):
|
175 |
-
concatTextFile.write("file batch_" + str(ips).zfill(4) + ".mp4\n")
|
176 |
-
concatTextFile.close()
|
177 |
-
|
178 |
-
concatedVideoOutputPath = os.path.join(folder_path, "concated_output.mp4")
|
179 |
-
ff = FFmpeg(
|
180 |
-
inputs={concatTextFilePath: None},
|
181 |
-
outputs={concatedVideoOutputPath: '-y -c copy'}
|
182 |
-
)
|
183 |
-
# !ffmpeg -y -f concat -i {concatTextFilePath} -c copy {concatedVideoOutputPath}
|
184 |
-
ff.run()
|
185 |
-
|
186 |
-
ff = FFmpeg(
|
187 |
-
inputs={concatedVideoOutputPath: None, audio_path: None},
|
188 |
-
outputs={output_mp4: '-y -map 0 -map 1:a -c:v libx264 -c:a aac -shortest'}
|
189 |
-
)
|
190 |
-
# !ffmpeg -y -i {concatedVideoOutputPath} -i {inputAudioPath} -map 0 -map 1:a -c:v copy -shortest {finalProcessedOuputVideo}
|
191 |
-
ff.run()
|
192 |
|
193 |
# from google.colab import files
|
194 |
# files.download(finalProcessedOuputVideo)
|
@@ -197,20 +160,3 @@ def call_gfpgan(wav2lip_mp4, audio_path, output_mp4):
|
|
197 |
if __name__ == '__main__':
|
198 |
run_with_ngrok(app, auth_token=auth_token)
|
199 |
app.run()
|
200 |
-
|
201 |
-
|
202 |
-
def test():
|
203 |
-
# request
|
204 |
-
import requests
|
205 |
-
ngrok_url = f"http://74c0-34-87-172-60.ngrok-free.app"
|
206 |
-
url = f"{ngrok_url}/wav2lip"
|
207 |
-
print(url)
|
208 |
-
video_path = '/Users/taoluo/Downloads/oIy5B4-vHVw.4.6588496370531551262.0.jpg'
|
209 |
-
audio_path = '/Users/taoluo/Downloads/test_audio.mp3'
|
210 |
-
files = {'video': ('video.jpg', open(video_path, 'rb')), 'audio': ('audio.mp3', open(audio_path, 'rb'))}
|
211 |
-
headers = {'ngrok-skip-browser-warning': 'true'}
|
212 |
-
response = requests.post(url, files=files, headers=headers)
|
213 |
-
# Print the response
|
214 |
-
print(response.json())
|
215 |
-
data = response.json()
|
216 |
-
print(ngrok_url + data['url'])
|
|
|
79 |
cmd = [
|
80 |
"python",
|
81 |
"wav2lip/inference.py",
|
82 |
+
"--checkpoint_path", checkpoint_path, #
|
83 |
# "--segmentation_path", "checkpoints/face_segmentation.pth",
|
84 |
"--face", video_path,
|
85 |
"--audio", audio_path,
|
86 |
"--outfile", output_path,
|
87 |
]
|
88 |
+
print(f'Call subprocess: {cmd}')
|
89 |
proc = subprocess.Popen(cmd, shell=False)
|
90 |
proc.communicate()
|
91 |
duration = datetime.now() - start
|
|
|
128 |
"gfpgan/inference_gfpgan.py",
|
129 |
"-i", origin_frames_folder,
|
130 |
"-o", folder_path,
|
131 |
+
# "-v", str(1.4),
|
132 |
+
# "-s", str(2),
|
133 |
"--only_center_face",
|
134 |
"--bg_upsampler", 'realesrgan'
|
135 |
]
|
136 |
print(cmd)
|
137 |
+
proc = subprocess.Popen(cmd, shell=True)
|
138 |
proc.communicate()
|
139 |
duration = datetime.now() - start
|
140 |
print(f'inference_gfpgan finished in {duration}')
|
141 |
|
142 |
+
start = datetime.now()
|
143 |
+
cmd = [
|
144 |
+
"python",
|
145 |
+
"merge.py",
|
146 |
+
"-j", folder_path,
|
147 |
+
"-a", audio_path,
|
148 |
+
"-o", output_mp4,
|
149 |
+
]
|
150 |
+
proc = subprocess.Popen(cmd, shell=True)
|
151 |
+
proc.communicate()
|
152 |
+
duration = datetime.now() - start
|
153 |
+
print(f'Merge output in {duration}')
|
154 |
+
print(output_mp4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
# from google.colab import files
|
157 |
# files.download(finalProcessedOuputVideo)
|
|
|
160 |
if __name__ == '__main__':
|
161 |
run_with_ngrok(app, auth_token=auth_token)
|
162 |
app.run()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gfpgan/inference_gfpgan.py
CHANGED
@@ -9,6 +9,8 @@ from tqdm import tqdm
|
|
9 |
|
10 |
from gfpgan import GFPGANer
|
11 |
|
|
|
|
|
12 |
|
13 |
def main():
|
14 |
"""Inference demo for GFPGAN (for users).
|
@@ -45,11 +47,13 @@ def main():
|
|
45 |
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs. Default: auto')
|
46 |
args = parser.parse_args()
|
47 |
|
48 |
-
args = parser.parse_args()
|
49 |
-
|
50 |
# ------------------------ input & output ------------------------
|
51 |
if args.input.endswith('/'):
|
52 |
args.input = args.input[:-1]
|
|
|
|
|
|
|
|
|
53 |
if os.path.isfile(args.input):
|
54 |
img_list = [args.input]
|
55 |
else:
|
@@ -96,9 +100,9 @@ def main():
|
|
96 |
raise ValueError(f'Wrong model version {args.version}.')
|
97 |
|
98 |
# determine model paths
|
99 |
-
model_path = os.path.join('experiments/pretrained_models', model_name + '.pth')
|
100 |
if not os.path.isfile(model_path):
|
101 |
-
model_path = os.path.join('realesrgan/weights', model_name + '.pth')
|
102 |
if not os.path.isfile(model_path):
|
103 |
raise ValueError(f'Model {model_name} does not exist.')
|
104 |
|
@@ -122,21 +126,21 @@ def main():
|
|
122 |
input_img, has_aligned=args.aligned, only_center_face=args.only_center_face, paste_back=True)
|
123 |
|
124 |
# save faces
|
125 |
-
if(args.save_faces):
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
|
141 |
# save restored img
|
142 |
if restored_img is not None:
|
|
|
9 |
|
10 |
from gfpgan import GFPGANer
|
11 |
|
12 |
+
root_dir = os.path.dirname(os.path.abspath(__file__))
|
13 |
+
|
14 |
|
15 |
def main():
|
16 |
"""Inference demo for GFPGAN (for users).
|
|
|
47 |
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs. Default: auto')
|
48 |
args = parser.parse_args()
|
49 |
|
|
|
|
|
50 |
# ------------------------ input & output ------------------------
|
51 |
if args.input.endswith('/'):
|
52 |
args.input = args.input[:-1]
|
53 |
+
assert os.path.isdir(args.input), f'Input folder {args.input} not exist.'
|
54 |
+
|
55 |
+
os.makedirs(args.output, exist_ok=True)
|
56 |
+
|
57 |
if os.path.isfile(args.input):
|
58 |
img_list = [args.input]
|
59 |
else:
|
|
|
100 |
raise ValueError(f'Wrong model version {args.version}.')
|
101 |
|
102 |
# determine model paths
|
103 |
+
model_path = os.path.join(root_dir, 'experiments/pretrained_models', model_name + '.pth')
|
104 |
if not os.path.isfile(model_path):
|
105 |
+
model_path = os.path.join(root_dir, 'realesrgan/weights', model_name + '.pth')
|
106 |
if not os.path.isfile(model_path):
|
107 |
raise ValueError(f'Model {model_name} does not exist.')
|
108 |
|
|
|
126 |
input_img, has_aligned=args.aligned, only_center_face=args.only_center_face, paste_back=True)
|
127 |
|
128 |
# save faces
|
129 |
+
if (args.save_faces):
|
130 |
+
for idx, (cropped_face, restored_face) in enumerate(zip(cropped_faces, restored_faces)):
|
131 |
+
# save cropped face
|
132 |
+
save_crop_path = os.path.join(args.output, 'cropped_faces', f'{basename}_{idx:02d}.png')
|
133 |
+
imwrite(cropped_face, save_crop_path)
|
134 |
+
# save restored face
|
135 |
+
if args.suffix is not None:
|
136 |
+
save_face_name = f'{basename}_{idx:02d}_{args.suffix}.png'
|
137 |
+
else:
|
138 |
+
save_face_name = f'{basename}_{idx:02d}.png'
|
139 |
+
save_restore_path = os.path.join(args.output, 'restored_faces', save_face_name)
|
140 |
+
imwrite(restored_face, save_restore_path)
|
141 |
+
# save comparison image
|
142 |
+
cmp_img = np.concatenate((cropped_face, restored_face), axis=1)
|
143 |
+
imwrite(cmp_img, os.path.join(args.output, 'cmp', f'{basename}_{idx:02d}.png'))
|
144 |
|
145 |
# save restored img
|
146 |
if restored_img is not None:
|
merge.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
|
5 |
+
from tqdm import tqdm
|
6 |
+
from ffmpy import FFmpeg
|
7 |
+
import cv2
|
8 |
+
|
9 |
+
|
10 |
+
def cli(args):
|
11 |
+
folder_path = args.job_folder
|
12 |
+
|
13 |
+
restoredFramesPath = os.path.join(folder_path, 'restored_imgs')
|
14 |
+
os.makedirs(restoredFramesPath, exist_ok=True)
|
15 |
+
dir_list = os.listdir(restoredFramesPath)
|
16 |
+
dir_list.sort()
|
17 |
+
batch = 0
|
18 |
+
batchSize = 300
|
19 |
+
for i in tqdm(range(0, len(dir_list), batchSize)):
|
20 |
+
img_array = []
|
21 |
+
start, end = i, i + batchSize
|
22 |
+
print("processing ", start, end)
|
23 |
+
|
24 |
+
batch_video = os.path.join(folder_path, 'batch_' + str(batch).zfill(4) + '.avi')
|
25 |
+
if os.path.isfile(batch_video):
|
26 |
+
print(f'{batch_video} existed, skipped.')
|
27 |
+
batch = batch + 1
|
28 |
+
continue
|
29 |
+
|
30 |
+
for filename in tqdm(dir_list[start:end]):
|
31 |
+
filename = os.path.join(restoredFramesPath, filename)
|
32 |
+
img = cv2.imread(filename)
|
33 |
+
if img is None:
|
34 |
+
continue
|
35 |
+
height, width, layers = img.shape
|
36 |
+
size = (width, height)
|
37 |
+
img_array.append(img)
|
38 |
+
|
39 |
+
out = cv2.VideoWriter(batch_video, cv2.VideoWriter_fourcc(*'DIVX'), 30, size)
|
40 |
+
batch = batch + 1
|
41 |
+
print(f'batch video dumped: {batch_video}')
|
42 |
+
|
43 |
+
for j in range(len(img_array)):
|
44 |
+
out.write(img_array[j])
|
45 |
+
out.release()
|
46 |
+
|
47 |
+
concatTextFilePath = os.path.join(folder_path, "concat.txt")
|
48 |
+
|
49 |
+
concatTextFile = open(concatTextFilePath, "w")
|
50 |
+
for ips in range(batch):
|
51 |
+
filename = os.path.join(folder_path, "batch_" + str(ips).zfill(4) + ".avi")
|
52 |
+
concatTextFile.write(f"file '{filename}'\n")
|
53 |
+
concatTextFile.close()
|
54 |
+
|
55 |
+
ff = FFmpeg(
|
56 |
+
inputs={
|
57 |
+
concatTextFilePath: '-f concat -safe 0 ',
|
58 |
+
args.audio: None
|
59 |
+
},
|
60 |
+
outputs={args.output: '-y -c:v libx264 -c:a aac -shortest'}
|
61 |
+
)
|
62 |
+
# !ffmpeg -y -f concat -safe 0 -i {concatTextFilePath} -i {audio} -c:v libx264 -c:a aac -shortest {output}
|
63 |
+
ff.run()
|
64 |
+
|
65 |
+
|
66 |
+
if __name__ == '__main__':
|
67 |
+
# merge images in restored_imgs folder as video, merge audio
|
68 |
+
parser = argparse.ArgumentParser()
|
69 |
+
parser.add_argument(
|
70 |
+
'-j',
|
71 |
+
'--job_folder',
|
72 |
+
help='job_folder, ./restored_imgs must under it')
|
73 |
+
parser.add_argument('-a', '--audio', type=str, help='audio file path')
|
74 |
+
parser.add_argument('-o', '--output', type=str, help='output file path')
|
75 |
+
args = parser.parse_args()
|
76 |
+
|
77 |
+
cli(args)
|