commanderx
commited on
Commit
•
908a1ab
1
Parent(s):
7260a4f
Upload 439 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +10 -0
- LICENSE +23 -0
- README.md +121 -0
- __pycache__/audio.cpython-36.pyc +0 -0
- __pycache__/audio.cpython-37.pyc +0 -0
- __pycache__/hparams.cpython-36.pyc +0 -0
- __pycache__/hparams.cpython-37.pyc +0 -0
- audio.py +136 -0
- basicsr/README.md +1 -0
- basicsr/__init__.py +12 -0
- basicsr/__pycache__/__init__.cpython-36.pyc +0 -0
- basicsr/__pycache__/__init__.cpython-37.pyc +0 -0
- basicsr/__pycache__/apply_sr.cpython-36.pyc +0 -0
- basicsr/__pycache__/apply_sr.cpython-37.pyc +0 -0
- basicsr/__pycache__/test.cpython-36.pyc +0 -0
- basicsr/__pycache__/test.cpython-37.pyc +0 -0
- basicsr/__pycache__/train.cpython-36.pyc +0 -0
- basicsr/__pycache__/train.cpython-37.pyc +0 -0
- basicsr/apply_sr.py +25 -0
- basicsr/archs/__init__.py +25 -0
- basicsr/archs/__pycache__/__init__.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/__init__.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/arch_util.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/arch_util.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/dfdnet_arch.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/dfdnet_arch.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/dfdnet_util.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/dfdnet_util.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/discriminator_arch.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/discriminator_arch.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/duf_arch.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/duf_arch.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/edsr_arch.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/edsr_arch.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/edvr_arch.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/edvr_arch.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/rcan_arch.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/rcan_arch.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/ridnet_arch.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/ridnet_arch.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/rrdbnet_arch.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/rrdbnet_arch.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/spynet_arch.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/spynet_arch.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/srresnet_arch.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/srresnet_arch.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/stylegan2_arch.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/stylegan2_arch.cpython-37.pyc +0 -0
- basicsr/archs/__pycache__/tof_arch.cpython-36.pyc +0 -0
- basicsr/archs/__pycache__/tof_arch.cpython-37.pyc +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,13 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
checkpoints/pretrained.state filter=lfs diff=lfs merge=lfs -text
|
37 |
+
examples/kennedy_hd.mkv filter=lfs diff=lfs merge=lfs -text
|
38 |
+
examples/mona_hd.jpg filter=lfs diff=lfs merge=lfs -text
|
39 |
+
examples/mona_hd.mkv filter=lfs diff=lfs merge=lfs -text
|
40 |
+
input_audios/part_000_RVC_1.wav filter=lfs diff=lfs merge=lfs -text
|
41 |
+
input_videos/MC.mp4 filter=lfs diff=lfs merge=lfs -text
|
42 |
+
output_videos_hd/kennedy.mkv filter=lfs diff=lfs merge=lfs -text
|
43 |
+
output_videos_hd/mona.mkv filter=lfs diff=lfs merge=lfs -text
|
44 |
+
temp/result.avi filter=lfs diff=lfs merge=lfs -text
|
45 |
+
temp/temp.wav filter=lfs diff=lfs merge=lfs -text
|
LICENSE
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Saif Hassan
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
22 |
+
|
23 |
+
NOTE: PLEASE READ LICENSE REQUIREMENTS (COPYRIGHTS INFORMATION) FROM `WAV2LIP OFFICIAL REPO`, MENTIONED ON MAIN PAGE OF THIS REPOSITORY.
|
README.md
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Wav2Lip-HD: Improving Wav2Lip to achieve High-Fidelity Videos
|
2 |
+
|
3 |
+
This repository contains code for achieving high-fidelity lip-syncing in videos, using the [Wav2Lip algorithm](https://github.com/Rudrabha/Wav2Lip) for lip-syncing and the [Real-ESRGAN algorithm](https://github.com/xinntao/Real-ESRGAN) for super-resolution. The combination of these two algorithms allows for the creation of lip-synced videos that are both highly accurate and visually stunning.
|
4 |
+
|
5 |
+
## Algorithm
|
6 |
+
|
7 |
+
The algorithm for achieving high-fidelity lip-syncing with Wav2Lip and Real-ESRGAN can be summarized as follows:
|
8 |
+
|
9 |
+
1. The input video and audio are given to `Wav2Lip` algorithm.
|
10 |
+
2. Python script is written to extract frames from the video generated by wav2lip.
|
11 |
+
3. Frames are provided to Real-ESRGAN algorithm to improve quality.
|
12 |
+
4. Then, the high-quality frames are converted to video using ffmpeg, along with the original audio.
|
13 |
+
5. The result is a high-quality lip-syncing video.
|
14 |
+
6. The specific steps for running this algorithm are described in the [Testing Model](https://github.com/saifhassan/Wav2Lip-HD#testing-model) section of this README.
|
15 |
+
|
16 |
+
## Testing Model
|
17 |
+
|
18 |
+
To test the "Wav2Lip-HD" model, follow these steps:
|
19 |
+
|
20 |
+
1. Clone this repository and install requirements using following command (Make sure, Python and CUDA are already installed):
|
21 |
+
|
22 |
+
```
|
23 |
+
git clone https://github.com/saifhassan/Wav2Lip-HD.git
|
24 |
+
cd Wav2Lip-HD
|
25 |
+
pip install -r requirements.txt
|
26 |
+
```
|
27 |
+
|
28 |
+
2. Downloading weights
|
29 |
+
|
30 |
+
| Model | Directory | Download Link |
|
31 |
+
| :------------- |:-------------| :-----:|
|
32 |
+
| Wav2Lip | [checkpoints/](https://github.com/saifhassan/Wav2Lip-HD/tree/main/checkpoints) | [Link](https://drive.google.com/drive/folders/1tB_uz-TYMePRMZzrDMdShWUZZ0JK3SIZ?usp=sharing) |
|
33 |
+
| ESRGAN | [experiments/001_ESRGAN_x4_f64b23_custom16k_500k_B16G1_wandb/models/](https://github.com/saifhassan/Wav2Lip-HD/tree/main/experiments/001_ESRGAN_x4_f64b23_custom16k_500k_B16G1_wandb/models) | [Link](https://drive.google.com/file/d/1Al8lEpnx2K-kDX7zL2DBcAuDnSKXACPb/view?usp=sharing) |
|
34 |
+
| Face_Detection | [face_detection/detection/sfd/](https://github.com/saifhassan/Wav2Lip-HD/tree/main/face_detection/detection/sfd) | [Link](https://drive.google.com/file/d/1uNLYCPFFmO-og3WSHyFytJQLLYOwH5uY/view?usp=sharing) |
|
35 |
+
| Real-ESRGAN | Real-ESRGAN/gfpgan/weights/ | [Link](https://drive.google.com/drive/folders/1BLx6aMpHgFt41fJ27_cRmT8bt53kVAYG?usp=sharing) |
|
36 |
+
| Real-ESRGAN | Real-ESRGAN/weights/ | [Link](https://drive.google.com/file/d/1qNIf8cJl_dQo3ivelPJVWFkApyEAGnLi/view?usp=sharing) |
|
37 |
+
|
38 |
+
|
39 |
+
3. Put input video to `input_videos` directory and input audio to `input_audios` directory.
|
40 |
+
4. Open `run_final.sh` file and modify following parameters:
|
41 |
+
|
42 |
+
`filename=kennedy` (just video file name without extension)
|
43 |
+
|
44 |
+
`input_audio=input_audios/ai.wav` (audio filename with extension)
|
45 |
+
|
46 |
+
5. Execute `run_final.sh` using following command:
|
47 |
+
|
48 |
+
```
|
49 |
+
bash run_final.sh
|
50 |
+
```
|
51 |
+
|
52 |
+
6. Outputs
|
53 |
+
|
54 |
+
- `output_videos_wav2lip` directory contains video output generated by wav2lip algorithm.
|
55 |
+
- `frames_wav2lip` directory contains frames extracted from video (generated by wav2lip algorithm).
|
56 |
+
- `frames_hd` directory contains frames after performing super-resolution using Real-ESRGAN algorithm.
|
57 |
+
- `output_videos_hd` directory contains final high quality video output generated by Wav2Lip-HD.
|
58 |
+
|
59 |
+
|
60 |
+
## Results
|
61 |
+
The results produced by Wav2Lip-HD are in two forms, one is frames and other is videos. Both are shared below:
|
62 |
+
|
63 |
+
### Example output frames </summary>
|
64 |
+
<table>
|
65 |
+
<tr>
|
66 |
+
<td>Frame by Wav2Lip</td>
|
67 |
+
<td>Optimized Frame</td>
|
68 |
+
</tr>
|
69 |
+
<tr>
|
70 |
+
<td><img src="examples/1_low.jpg" width=500></td>
|
71 |
+
<td><img src="examples/1_hd.jpg" width=500></td>
|
72 |
+
</tr>
|
73 |
+
<tr>
|
74 |
+
<td><img src="examples/kennedy_low.jpg" width=500></td>
|
75 |
+
<td><img src="examples/kennedy_hd.jpg" width=500></td>
|
76 |
+
</tr>
|
77 |
+
|
78 |
+
</tr>
|
79 |
+
<tr>
|
80 |
+
<td><img src="examples/mona_low.jpg" width=500></td>
|
81 |
+
<td><img src="examples/mona_hd.jpg" width=500></td>
|
82 |
+
</tr>
|
83 |
+
</table>
|
84 |
+
</Details>
|
85 |
+
|
86 |
+
### Example output videos
|
87 |
+
|
88 |
+
| Video by Wav2Lip | Optimized Video |
|
89 |
+
| ------------- | ------------- |
|
90 |
+
| <video src="https://user-images.githubusercontent.com/11873763/229389410-56d96244-8c67-4add-a43e-a4900aa9db88.mp4" width="500"> | <video src="https://user-images.githubusercontent.com/11873763/229389414-d5cb6d33-7772-47a7-b829-9e3d5c3945a1.mp4" width="500">|
|
91 |
+
| <video src="https://user-images.githubusercontent.com/11873763/229389751-507669f1-7772-4863-ab23-8df7f206a065.mp4" width="500"> | <video src="https://user-images.githubusercontent.com/11873763/229389962-5373b765-ce3a-4af2-bd6a-8be8543ee933.mp4" width="500">|
|
92 |
+
|
93 |
+
## Acknowledgements
|
94 |
+
|
95 |
+
We would like to thank the following repositories and libraries for their contributions to our work:
|
96 |
+
|
97 |
+
1. The [Wav2Lip](https://github.com/Rudrabha/Wav2Lip) repository, which is the core model of our algorithm that performs lip-sync.
|
98 |
+
2. The [face-parsing.PyTorch](https://github.com/zllrunning/face-parsing.PyTorch) repository, which provides us with a model for face segmentation.
|
99 |
+
3. The [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN) repository, which provides the super resolution component for our algorithm.
|
100 |
+
4. [ffmpeg](https://ffmpeg.org), which we use for converting frames to video.
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
|
120 |
+
|
121 |
+
|
__pycache__/audio.cpython-36.pyc
ADDED
Binary file (4.51 kB). View file
|
|
__pycache__/audio.cpython-37.pyc
ADDED
Binary file (4.55 kB). View file
|
|
__pycache__/hparams.cpython-36.pyc
ADDED
Binary file (2.37 kB). View file
|
|
__pycache__/hparams.cpython-37.pyc
ADDED
Binary file (2.39 kB). View file
|
|
audio.py
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import librosa
|
2 |
+
import librosa.filters
|
3 |
+
import numpy as np
|
4 |
+
# import tensorflow as tf
|
5 |
+
from scipy import signal
|
6 |
+
from scipy.io import wavfile
|
7 |
+
from hparams import hparams as hp
|
8 |
+
|
9 |
+
def load_wav(path, sr):
|
10 |
+
return librosa.core.load(path, sr=sr)[0]
|
11 |
+
|
12 |
+
def save_wav(wav, path, sr):
|
13 |
+
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
|
14 |
+
#proposed by @dsmiller
|
15 |
+
wavfile.write(path, sr, wav.astype(np.int16))
|
16 |
+
|
17 |
+
def save_wavenet_wav(wav, path, sr):
|
18 |
+
librosa.output.write_wav(path, wav, sr=sr)
|
19 |
+
|
20 |
+
def preemphasis(wav, k, preemphasize=True):
|
21 |
+
if preemphasize:
|
22 |
+
return signal.lfilter([1, -k], [1], wav)
|
23 |
+
return wav
|
24 |
+
|
25 |
+
def inv_preemphasis(wav, k, inv_preemphasize=True):
|
26 |
+
if inv_preemphasize:
|
27 |
+
return signal.lfilter([1], [1, -k], wav)
|
28 |
+
return wav
|
29 |
+
|
30 |
+
def get_hop_size():
|
31 |
+
hop_size = hp.hop_size
|
32 |
+
if hop_size is None:
|
33 |
+
assert hp.frame_shift_ms is not None
|
34 |
+
hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate)
|
35 |
+
return hop_size
|
36 |
+
|
37 |
+
def linearspectrogram(wav):
|
38 |
+
D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
|
39 |
+
S = _amp_to_db(np.abs(D)) - hp.ref_level_db
|
40 |
+
|
41 |
+
if hp.signal_normalization:
|
42 |
+
return _normalize(S)
|
43 |
+
return S
|
44 |
+
|
45 |
+
def melspectrogram(wav):
|
46 |
+
D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
|
47 |
+
S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db
|
48 |
+
|
49 |
+
if hp.signal_normalization:
|
50 |
+
return _normalize(S)
|
51 |
+
return S
|
52 |
+
|
53 |
+
def _lws_processor():
|
54 |
+
import lws
|
55 |
+
return lws.lws(hp.n_fft, get_hop_size(), fftsize=hp.win_size, mode="speech")
|
56 |
+
|
57 |
+
def _stft(y):
|
58 |
+
if hp.use_lws:
|
59 |
+
return _lws_processor(hp).stft(y).T
|
60 |
+
else:
|
61 |
+
return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size)
|
62 |
+
|
63 |
+
##########################################################
|
64 |
+
#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
|
65 |
+
def num_frames(length, fsize, fshift):
|
66 |
+
"""Compute number of time frames of spectrogram
|
67 |
+
"""
|
68 |
+
pad = (fsize - fshift)
|
69 |
+
if length % fshift == 0:
|
70 |
+
M = (length + pad * 2 - fsize) // fshift + 1
|
71 |
+
else:
|
72 |
+
M = (length + pad * 2 - fsize) // fshift + 2
|
73 |
+
return M
|
74 |
+
|
75 |
+
|
76 |
+
def pad_lr(x, fsize, fshift):
|
77 |
+
"""Compute left and right padding
|
78 |
+
"""
|
79 |
+
M = num_frames(len(x), fsize, fshift)
|
80 |
+
pad = (fsize - fshift)
|
81 |
+
T = len(x) + 2 * pad
|
82 |
+
r = (M - 1) * fshift + fsize - T
|
83 |
+
return pad, pad + r
|
84 |
+
##########################################################
|
85 |
+
#Librosa correct padding
|
86 |
+
def librosa_pad_lr(x, fsize, fshift):
|
87 |
+
return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
|
88 |
+
|
89 |
+
# Conversions
|
90 |
+
_mel_basis = None
|
91 |
+
|
92 |
+
def _linear_to_mel(spectogram):
|
93 |
+
global _mel_basis
|
94 |
+
if _mel_basis is None:
|
95 |
+
_mel_basis = _build_mel_basis()
|
96 |
+
return np.dot(_mel_basis, spectogram)
|
97 |
+
|
98 |
+
def _build_mel_basis():
|
99 |
+
assert hp.fmax <= hp.sample_rate // 2
|
100 |
+
return librosa.filters.mel(hp.sample_rate, hp.n_fft, n_mels=hp.num_mels,
|
101 |
+
fmin=hp.fmin, fmax=hp.fmax)
|
102 |
+
|
103 |
+
def _amp_to_db(x):
|
104 |
+
min_level = np.exp(hp.min_level_db / 20 * np.log(10))
|
105 |
+
return 20 * np.log10(np.maximum(min_level, x))
|
106 |
+
|
107 |
+
def _db_to_amp(x):
|
108 |
+
return np.power(10.0, (x) * 0.05)
|
109 |
+
|
110 |
+
def _normalize(S):
|
111 |
+
if hp.allow_clipping_in_normalization:
|
112 |
+
if hp.symmetric_mels:
|
113 |
+
return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value,
|
114 |
+
-hp.max_abs_value, hp.max_abs_value)
|
115 |
+
else:
|
116 |
+
return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value)
|
117 |
+
|
118 |
+
assert S.max() <= 0 and S.min() - hp.min_level_db >= 0
|
119 |
+
if hp.symmetric_mels:
|
120 |
+
return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value
|
121 |
+
else:
|
122 |
+
return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db))
|
123 |
+
|
124 |
+
def _denormalize(D):
|
125 |
+
if hp.allow_clipping_in_normalization:
|
126 |
+
if hp.symmetric_mels:
|
127 |
+
return (((np.clip(D, -hp.max_abs_value,
|
128 |
+
hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value))
|
129 |
+
+ hp.min_level_db)
|
130 |
+
else:
|
131 |
+
return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
|
132 |
+
|
133 |
+
if hp.symmetric_mels:
|
134 |
+
return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db)
|
135 |
+
else:
|
136 |
+
return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
|
basicsr/README.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
This folder is almost unmodified code from [BasicSR](https://github.com/xinntao/BasicSR.git) repository that provides solutions for image and video super resolution.
|
basicsr/__init__.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# https://github.com/xinntao/BasicSR
|
2 |
+
# flake8: noqa
|
3 |
+
from .archs import *
|
4 |
+
from .data import *
|
5 |
+
from .losses import *
|
6 |
+
from .metrics import *
|
7 |
+
from .models import *
|
8 |
+
from .ops import *
|
9 |
+
from .test import *
|
10 |
+
from .train import *
|
11 |
+
from .utils import *
|
12 |
+
# from .version import __gitsha__, __version__
|
basicsr/__pycache__/__init__.cpython-36.pyc
ADDED
Binary file (277 Bytes). View file
|
|
basicsr/__pycache__/__init__.cpython-37.pyc
ADDED
Binary file (311 Bytes). View file
|
|
basicsr/__pycache__/apply_sr.cpython-36.pyc
ADDED
Binary file (1.07 kB). View file
|
|
basicsr/__pycache__/apply_sr.cpython-37.pyc
ADDED
Binary file (1.1 kB). View file
|
|
basicsr/__pycache__/test.cpython-36.pyc
ADDED
Binary file (1.6 kB). View file
|
|
basicsr/__pycache__/test.cpython-37.pyc
ADDED
Binary file (1.64 kB). View file
|
|
basicsr/__pycache__/train.cpython-36.pyc
ADDED
Binary file (6.21 kB). View file
|
|
basicsr/__pycache__/train.cpython-37.pyc
ADDED
Binary file (6.24 kB). View file
|
|
basicsr/apply_sr.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import torch
|
4 |
+
|
5 |
+
from basicsr.archs.rrdbnet_arch import RRDBNet
|
6 |
+
|
7 |
+
|
8 |
+
def init_sr_model(model_path):
|
9 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32)
|
10 |
+
model.load_state_dict(torch.load(model_path)['params'], strict=True)
|
11 |
+
model.eval()
|
12 |
+
model = model.cuda()
|
13 |
+
return model
|
14 |
+
|
15 |
+
|
16 |
+
def enhance(model, image):
|
17 |
+
img = image.astype(np.float32) / 255.
|
18 |
+
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
|
19 |
+
img = img.unsqueeze(0).cuda()
|
20 |
+
with torch.no_grad():
|
21 |
+
output = model(img)
|
22 |
+
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
|
23 |
+
output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0))
|
24 |
+
output = (output * 255.0).round().astype(np.uint8)
|
25 |
+
return output
|
basicsr/archs/__init__.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
from copy import deepcopy
|
3 |
+
from os import path as osp
|
4 |
+
|
5 |
+
from basicsr.utils import get_root_logger, scandir
|
6 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
7 |
+
|
8 |
+
__all__ = ['build_network']
|
9 |
+
|
10 |
+
# automatically scan and import arch modules for registry
|
11 |
+
# scan all the files under the 'archs' folder and collect files ending with
|
12 |
+
# '_arch.py'
|
13 |
+
arch_folder = osp.dirname(osp.abspath(__file__))
|
14 |
+
arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')]
|
15 |
+
# import all the arch modules
|
16 |
+
_arch_modules = [importlib.import_module(f'basicsr.archs.{file_name}') for file_name in arch_filenames]
|
17 |
+
|
18 |
+
|
19 |
+
def build_network(opt):
|
20 |
+
opt = deepcopy(opt)
|
21 |
+
network_type = opt.pop('type')
|
22 |
+
net = ARCH_REGISTRY.get(network_type)(**opt)
|
23 |
+
logger = get_root_logger()
|
24 |
+
logger.info(f'Network [{net.__class__.__name__}] is created.')
|
25 |
+
return net
|
basicsr/archs/__pycache__/__init__.cpython-36.pyc
ADDED
Binary file (1.1 kB). View file
|
|
basicsr/archs/__pycache__/__init__.cpython-37.pyc
ADDED
Binary file (1.13 kB). View file
|
|
basicsr/archs/__pycache__/arch_util.cpython-36.pyc
ADDED
Binary file (8.22 kB). View file
|
|
basicsr/archs/__pycache__/arch_util.cpython-37.pyc
ADDED
Binary file (8.26 kB). View file
|
|
basicsr/archs/__pycache__/dfdnet_arch.cpython-36.pyc
ADDED
Binary file (4.98 kB). View file
|
|
basicsr/archs/__pycache__/dfdnet_arch.cpython-37.pyc
ADDED
Binary file (5.04 kB). View file
|
|
basicsr/archs/__pycache__/dfdnet_util.cpython-36.pyc
ADDED
Binary file (5.64 kB). View file
|
|
basicsr/archs/__pycache__/dfdnet_util.cpython-37.pyc
ADDED
Binary file (5.68 kB). View file
|
|
basicsr/archs/__pycache__/discriminator_arch.cpython-36.pyc
ADDED
Binary file (2.51 kB). View file
|
|
basicsr/archs/__pycache__/discriminator_arch.cpython-37.pyc
ADDED
Binary file (2.53 kB). View file
|
|
basicsr/archs/__pycache__/duf_arch.cpython-36.pyc
ADDED
Binary file (10.1 kB). View file
|
|
basicsr/archs/__pycache__/duf_arch.cpython-37.pyc
ADDED
Binary file (9.56 kB). View file
|
|
basicsr/archs/__pycache__/edsr_arch.cpython-36.pyc
ADDED
Binary file (2.28 kB). View file
|
|
basicsr/archs/__pycache__/edsr_arch.cpython-37.pyc
ADDED
Binary file (2.3 kB). View file
|
|
basicsr/archs/__pycache__/edvr_arch.cpython-36.pyc
ADDED
Binary file (11.5 kB). View file
|
|
basicsr/archs/__pycache__/edvr_arch.cpython-37.pyc
ADDED
Binary file (11.5 kB). View file
|
|
basicsr/archs/__pycache__/rcan_arch.cpython-36.pyc
ADDED
Binary file (5.05 kB). View file
|
|
basicsr/archs/__pycache__/rcan_arch.cpython-37.pyc
ADDED
Binary file (5.07 kB). View file
|
|
basicsr/archs/__pycache__/ridnet_arch.cpython-36.pyc
ADDED
Binary file (6.69 kB). View file
|
|
basicsr/archs/__pycache__/ridnet_arch.cpython-37.pyc
ADDED
Binary file (6.71 kB). View file
|
|
basicsr/archs/__pycache__/rrdbnet_arch.cpython-36.pyc
ADDED
Binary file (4.02 kB). View file
|
|
basicsr/archs/__pycache__/rrdbnet_arch.cpython-37.pyc
ADDED
Binary file (4.05 kB). View file
|
|
basicsr/archs/__pycache__/spynet_arch.cpython-36.pyc
ADDED
Binary file (3.8 kB). View file
|
|
basicsr/archs/__pycache__/spynet_arch.cpython-37.pyc
ADDED
Binary file (3.84 kB). View file
|
|
basicsr/archs/__pycache__/srresnet_arch.cpython-36.pyc
ADDED
Binary file (2.51 kB). View file
|
|
basicsr/archs/__pycache__/srresnet_arch.cpython-37.pyc
ADDED
Binary file (2.54 kB). View file
|
|
basicsr/archs/__pycache__/stylegan2_arch.cpython-36.pyc
ADDED
Binary file (25.9 kB). View file
|
|
basicsr/archs/__pycache__/stylegan2_arch.cpython-37.pyc
ADDED
Binary file (25.9 kB). View file
|
|
basicsr/archs/__pycache__/tof_arch.cpython-36.pyc
ADDED
Binary file (6.3 kB). View file
|
|
basicsr/archs/__pycache__/tof_arch.cpython-37.pyc
ADDED
Binary file (6.33 kB). View file
|
|