Spaces:
Runtime error
Runtime error
add sound back into enhanced video (#61)
Browse files- README.md +3 -4
- basicsr/utils/video_util.py +119 -0
- facelib/utils/misc.py +2 -2
- inference_codeformer.py +16 -11
- requirements.txt +1 -3
README.md
CHANGED
@@ -81,12 +81,12 @@ python basicsr/setup.py develop
|
|
81 |
### Quick Inference
|
82 |
|
83 |
#### Download Pre-trained Models:
|
84 |
-
Download the facelib pretrained models from [[Google Drive](https://drive.google.com/drive/folders/1b_3qwrzY_kTQh0-SnBoGBgOrJ_PLZSKm?usp=sharing) | [OneDrive](https://entuedu-my.sharepoint.com/:f:/g/personal/s200094_e_ntu_edu_sg/EvDxR7FcAbZMp_MA9ouq7aQB8XTppMb3-T0uGZ_2anI2mg?e=DXsJFo)] to the `weights/facelib` folder. You can manually download the pretrained models OR download by
|
85 |
```
|
86 |
python scripts/download_pretrained_models.py facelib
|
87 |
```
|
88 |
|
89 |
-
Download the CodeFormer pretrained models from [[Google Drive](https://drive.google.com/drive/folders/1CNNByjHDFt0b95q54yMVp6Ifo5iuU6QS?usp=sharing) | [OneDrive](https://entuedu-my.sharepoint.com/:f:/g/personal/s200094_e_ntu_edu_sg/EoKFj4wo8cdIn2-TY2IV6CYBhZ0pIG4kUOeHdPR_A5nlbg?e=AO8UN9)] to the `weights/CodeFormer` folder. You can manually download the pretrained models OR download by
|
90 |
```
|
91 |
python scripts/download_pretrained_models.py CodeFormer
|
92 |
```
|
@@ -115,8 +115,7 @@ python inference_codeformer.py -w 0.7 --input_path [image folder/image path]
|
|
115 |
:clapper: Video Enhancement
|
116 |
```
|
117 |
# For video clips
|
118 |
-
|
119 |
-
python inference_codeformer.py --bg_upsampler realesrgan --face_upsample -w 1.0 --input_path [video path] --save_video_fps 24
|
120 |
```
|
121 |
|
122 |
|
|
|
81 |
### Quick Inference
|
82 |
|
83 |
#### Download Pre-trained Models:
|
84 |
+
Download the facelib pretrained models from [[Google Drive](https://drive.google.com/drive/folders/1b_3qwrzY_kTQh0-SnBoGBgOrJ_PLZSKm?usp=sharing) | [OneDrive](https://entuedu-my.sharepoint.com/:f:/g/personal/s200094_e_ntu_edu_sg/EvDxR7FcAbZMp_MA9ouq7aQB8XTppMb3-T0uGZ_2anI2mg?e=DXsJFo)] to the `weights/facelib` folder. You can manually download the pretrained models OR download by running the following command.
|
85 |
```
|
86 |
python scripts/download_pretrained_models.py facelib
|
87 |
```
|
88 |
|
89 |
+
Download the CodeFormer pretrained models from [[Google Drive](https://drive.google.com/drive/folders/1CNNByjHDFt0b95q54yMVp6Ifo5iuU6QS?usp=sharing) | [OneDrive](https://entuedu-my.sharepoint.com/:f:/g/personal/s200094_e_ntu_edu_sg/EoKFj4wo8cdIn2-TY2IV6CYBhZ0pIG4kUOeHdPR_A5nlbg?e=AO8UN9)] to the `weights/CodeFormer` folder. You can manually download the pretrained models OR download by running the following command.
|
90 |
```
|
91 |
python scripts/download_pretrained_models.py CodeFormer
|
92 |
```
|
|
|
115 |
:clapper: Video Enhancement
|
116 |
```
|
117 |
# For video clips
|
118 |
+
python inference_codeformer.py --bg_upsampler realesrgan --face_upsample -w 1.0 --input_path [video path]
|
|
|
119 |
```
|
120 |
|
121 |
|
basicsr/utils/video_util.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
The code is modified from the Real-ESRGAN:
|
3 |
+
https://github.com/xinntao/Real-ESRGAN/blob/master/inference_realesrgan_video.py
|
4 |
+
|
5 |
+
'''
|
6 |
+
import cv2
|
7 |
+
import sys
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
try:
|
11 |
+
import ffmpeg
|
12 |
+
except ImportError:
|
13 |
+
import pip
|
14 |
+
pip.main(['install', '--user', 'ffmpeg-python'])
|
15 |
+
import ffmpeg
|
16 |
+
|
17 |
+
def get_video_meta_info(video_path):
|
18 |
+
ret = {}
|
19 |
+
probe = ffmpeg.probe(video_path)
|
20 |
+
video_streams = [stream for stream in probe['streams'] if stream['codec_type'] == 'video']
|
21 |
+
has_audio = any(stream['codec_type'] == 'audio' for stream in probe['streams'])
|
22 |
+
ret['width'] = video_streams[0]['width']
|
23 |
+
ret['height'] = video_streams[0]['height']
|
24 |
+
ret['fps'] = eval(video_streams[0]['avg_frame_rate'])
|
25 |
+
ret['audio'] = ffmpeg.input(video_path).audio if has_audio else None
|
26 |
+
ret['nb_frames'] = int(video_streams[0]['nb_frames'])
|
27 |
+
return ret
|
28 |
+
|
29 |
+
class VideoReader:
|
30 |
+
def __init__(self, video_path):
|
31 |
+
self.paths = [] # for image&folder type
|
32 |
+
self.audio = None
|
33 |
+
self.stream_reader = (
|
34 |
+
ffmpeg.input(video_path).output('pipe:', format='rawvideo', pix_fmt='bgr24',
|
35 |
+
loglevel='error').run_async(
|
36 |
+
pipe_stdin=True, pipe_stdout=True, cmd='ffmpeg'))
|
37 |
+
meta = get_video_meta_info(video_path)
|
38 |
+
self.width = meta['width']
|
39 |
+
self.height = meta['height']
|
40 |
+
self.input_fps = meta['fps']
|
41 |
+
self.audio = meta['audio']
|
42 |
+
self.nb_frames = meta['nb_frames']
|
43 |
+
|
44 |
+
self.idx = 0
|
45 |
+
|
46 |
+
def get_resolution(self):
|
47 |
+
return self.height, self.width
|
48 |
+
|
49 |
+
def get_fps(self):
|
50 |
+
if self.input_fps is not None:
|
51 |
+
return self.input_fps
|
52 |
+
return 24
|
53 |
+
|
54 |
+
def get_audio(self):
|
55 |
+
return self.audio
|
56 |
+
|
57 |
+
def __len__(self):
|
58 |
+
return self.nb_frames
|
59 |
+
|
60 |
+
def get_frame_from_stream(self):
|
61 |
+
img_bytes = self.stream_reader.stdout.read(self.width * self.height * 3) # 3 bytes for one pixel
|
62 |
+
if not img_bytes:
|
63 |
+
return None
|
64 |
+
img = np.frombuffer(img_bytes, np.uint8).reshape([self.height, self.width, 3])
|
65 |
+
return img
|
66 |
+
|
67 |
+
def get_frame_from_list(self):
|
68 |
+
if self.idx >= self.nb_frames:
|
69 |
+
return None
|
70 |
+
img = cv2.imread(self.paths[self.idx])
|
71 |
+
self.idx += 1
|
72 |
+
return img
|
73 |
+
|
74 |
+
def get_frame(self):
|
75 |
+
return self.get_frame_from_stream()
|
76 |
+
|
77 |
+
|
78 |
+
def close(self):
|
79 |
+
self.stream_reader.stdin.close()
|
80 |
+
self.stream_reader.wait()
|
81 |
+
|
82 |
+
|
83 |
+
class VideoWriter:
|
84 |
+
def __init__(self, video_save_path, height, width, fps, audio):
|
85 |
+
if height > 2160:
|
86 |
+
print('You are generating video that is larger than 4K, which will be very slow due to IO speed.',
|
87 |
+
'We highly recommend to decrease the outscale(aka, -s).')
|
88 |
+
if audio is not None:
|
89 |
+
self.stream_writer = (
|
90 |
+
ffmpeg.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{width}x{height}',
|
91 |
+
framerate=fps).output(
|
92 |
+
audio,
|
93 |
+
video_save_path,
|
94 |
+
pix_fmt='yuv420p',
|
95 |
+
vcodec='libx264',
|
96 |
+
loglevel='error',
|
97 |
+
acodec='copy').overwrite_output().run_async(
|
98 |
+
pipe_stdin=True, pipe_stdout=True, cmd='ffmpeg'))
|
99 |
+
else:
|
100 |
+
self.stream_writer = (
|
101 |
+
ffmpeg.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{width}x{height}',
|
102 |
+
framerate=fps).output(
|
103 |
+
video_save_path, pix_fmt='yuv420p', vcodec='libx264',
|
104 |
+
loglevel='error').overwrite_output().run_async(
|
105 |
+
pipe_stdin=True, pipe_stdout=True, cmd='ffmpeg'))
|
106 |
+
|
107 |
+
def write_frame(self, frame):
|
108 |
+
try:
|
109 |
+
frame = frame.astype(np.uint8).tobytes()
|
110 |
+
self.stream_writer.stdin.write(frame)
|
111 |
+
except BrokenPipeError:
|
112 |
+
print('Please re-install ffmpeg and libx264 by running\n',
|
113 |
+
'\t$ conda install -c conda-forge ffmpeg\n',
|
114 |
+
'\t$ conda install -c conda-forge x264')
|
115 |
+
sys.exit(0)
|
116 |
+
|
117 |
+
def close(self):
|
118 |
+
self.stream_writer.stdin.close()
|
119 |
+
self.stream_writer.wait()
|
facelib/utils/misc.py
CHANGED
@@ -7,13 +7,13 @@ import torch
|
|
7 |
from torch.hub import download_url_to_file, get_dir
|
8 |
from urllib.parse import urlparse
|
9 |
# from basicsr.utils.download_util import download_file_from_google_drive
|
10 |
-
import gdown
|
11 |
-
|
12 |
|
13 |
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
14 |
|
15 |
|
16 |
def download_pretrained_models(file_ids, save_path_root):
|
|
|
|
|
17 |
os.makedirs(save_path_root, exist_ok=True)
|
18 |
|
19 |
for file_name, file_id in file_ids.items():
|
|
|
7 |
from torch.hub import download_url_to_file, get_dir
|
8 |
from urllib.parse import urlparse
|
9 |
# from basicsr.utils.download_util import download_file_from_google_drive
|
|
|
|
|
10 |
|
11 |
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
12 |
|
13 |
|
14 |
def download_pretrained_models(file_ids, save_path_root):
|
15 |
+
import gdown
|
16 |
+
|
17 |
os.makedirs(save_path_root, exist_ok=True)
|
18 |
|
19 |
for file_name, file_id in file_ids.items():
|
inference_codeformer.py
CHANGED
@@ -72,7 +72,7 @@ if __name__ == '__main__':
|
|
72 |
parser.add_argument('--face_upsample', action='store_true', help='Face upsampler after enhancement. Default: False')
|
73 |
parser.add_argument('--bg_tile', type=int, default=400, help='Tile size for background sampler. Default: 400')
|
74 |
parser.add_argument('--suffix', type=str, default=None, help='Suffix of the restored faces. Default: None')
|
75 |
-
parser.add_argument('--save_video_fps', type=
|
76 |
|
77 |
args = parser.parse_args()
|
78 |
|
@@ -83,15 +83,19 @@ if __name__ == '__main__':
|
|
83 |
input_img_list = [args.input_path]
|
84 |
result_root = f'results/test_img_{w}'
|
85 |
elif args.input_path.endswith(('mp4', 'mov', 'avi')): # input video path
|
|
|
86 |
input_img_list = []
|
87 |
-
|
88 |
-
|
89 |
-
while
|
90 |
input_img_list.append(image)
|
91 |
-
|
92 |
-
|
|
|
93 |
video_name = os.path.basename(args.input_path)[:-4]
|
94 |
result_root = f'results/{video_name}_{w}'
|
|
|
|
|
95 |
else: # input img folder
|
96 |
if args.input_path.endswith('/'): # solve when path ends with /
|
97 |
args.input_path = args.input_path[:-1]
|
@@ -241,6 +245,7 @@ if __name__ == '__main__':
|
|
241 |
|
242 |
# save enhanced video
|
243 |
if input_video:
|
|
|
244 |
# load images
|
245 |
video_frames = []
|
246 |
img_list = sorted(glob.glob(os.path.join(result_root, 'final_results', '*.[jp][pn]g')))
|
@@ -248,14 +253,14 @@ if __name__ == '__main__':
|
|
248 |
img = cv2.imread(img_path)
|
249 |
video_frames.append(img)
|
250 |
# write images to video
|
251 |
-
|
252 |
if args.suffix is not None:
|
253 |
video_name = f'{video_name}_{args.suffix}.png'
|
254 |
save_restore_path = os.path.join(result_root, f'{video_name}.mp4')
|
255 |
-
|
256 |
-
|
257 |
for f in video_frames:
|
258 |
-
|
259 |
-
|
260 |
|
261 |
print(f'\nAll results are saved in {result_root}')
|
|
|
72 |
parser.add_argument('--face_upsample', action='store_true', help='Face upsampler after enhancement. Default: False')
|
73 |
parser.add_argument('--bg_tile', type=int, default=400, help='Tile size for background sampler. Default: 400')
|
74 |
parser.add_argument('--suffix', type=str, default=None, help='Suffix of the restored faces. Default: None')
|
75 |
+
parser.add_argument('--save_video_fps', type=float, default=None, help='Frame rate for saving video. Default: None')
|
76 |
|
77 |
args = parser.parse_args()
|
78 |
|
|
|
83 |
input_img_list = [args.input_path]
|
84 |
result_root = f'results/test_img_{w}'
|
85 |
elif args.input_path.endswith(('mp4', 'mov', 'avi')): # input video path
|
86 |
+
from basicsr.utils.video_util import VideoReader, VideoWriter
|
87 |
input_img_list = []
|
88 |
+
vidreader = VideoReader(args.input_path)
|
89 |
+
image = vidreader.get_frame()
|
90 |
+
while image is not None:
|
91 |
input_img_list.append(image)
|
92 |
+
image = vidreader.get_frame()
|
93 |
+
audio = vidreader.get_audio()
|
94 |
+
fps = vidreader.get_fps() if args.save_video_fps is None else args.save_video_fps
|
95 |
video_name = os.path.basename(args.input_path)[:-4]
|
96 |
result_root = f'results/{video_name}_{w}'
|
97 |
+
input_video = True
|
98 |
+
vidreader.close()
|
99 |
else: # input img folder
|
100 |
if args.input_path.endswith('/'): # solve when path ends with /
|
101 |
args.input_path = args.input_path[:-1]
|
|
|
245 |
|
246 |
# save enhanced video
|
247 |
if input_video:
|
248 |
+
print('Video Saving...')
|
249 |
# load images
|
250 |
video_frames = []
|
251 |
img_list = sorted(glob.glob(os.path.join(result_root, 'final_results', '*.[jp][pn]g')))
|
|
|
253 |
img = cv2.imread(img_path)
|
254 |
video_frames.append(img)
|
255 |
# write images to video
|
256 |
+
height, width = video_frames[0].shape[:2]
|
257 |
if args.suffix is not None:
|
258 |
video_name = f'{video_name}_{args.suffix}.png'
|
259 |
save_restore_path = os.path.join(result_root, f'{video_name}.mp4')
|
260 |
+
vidwriter = VideoWriter(save_restore_path, height, width, fps, audio)
|
261 |
+
|
262 |
for f in video_frames:
|
263 |
+
vidwriter.write_frame(f)
|
264 |
+
vidwriter.close()
|
265 |
|
266 |
print(f'\nAll results are saved in {result_root}')
|
requirements.txt
CHANGED
@@ -15,6 +15,4 @@ tqdm
|
|
15 |
yapf
|
16 |
lpips
|
17 |
gdown # supports downloading the large file from Google Drive
|
18 |
-
|
19 |
-
# dlib
|
20 |
-
# conda install -c conda-forge dlib
|
|
|
15 |
yapf
|
16 |
lpips
|
17 |
gdown # supports downloading the large file from Google Drive
|
18 |
+
ffmpeg-python
|
|
|
|