Katock commited on
Commit
040c3ba
·
1 Parent(s): 794e885
Files changed (40) hide show
  1. app.py +7 -7
  2. cluster/kmeans.py +201 -0
  3. cluster/train_cluster.py +31 -36
  4. diffusion/__init__.py +0 -0
  5. diffusion/data_loaders.py +284 -0
  6. diffusion/diffusion.py +317 -0
  7. diffusion/diffusion_onnx.py +612 -0
  8. diffusion/dpm_solver_pytorch.py +1201 -0
  9. diffusion/how to export onnx.md +4 -0
  10. diffusion/infer_gt_mel.py +74 -0
  11. diffusion/logger/__init__.py +0 -0
  12. diffusion/logger/saver.py +150 -0
  13. diffusion/logger/utils.py +126 -0
  14. diffusion/onnx_export.py +226 -0
  15. diffusion/solver.py +195 -0
  16. diffusion/unit2mel.py +147 -0
  17. diffusion/vocoder.py +94 -0
  18. diffusion/wavenet.py +108 -0
  19. inference/infer_tool.py +267 -58
  20. inference/infer_tool_grad.py +1 -1
  21. modules/F0Predictor/CrepeF0Predictor.py +31 -0
  22. modules/F0Predictor/DioF0Predictor.py +85 -0
  23. modules/F0Predictor/F0Predictor.py +16 -0
  24. modules/F0Predictor/HarvestF0Predictor.py +81 -0
  25. modules/F0Predictor/PMF0Predictor.py +83 -0
  26. modules/F0Predictor/__init__.py +0 -0
  27. modules/F0Predictor/crepe.py +340 -0
  28. modules/enhancer.py +105 -0
  29. vdecoder/hifiganwithsnake/alias/__init__.py +6 -0
  30. vdecoder/hifiganwithsnake/alias/act.py +129 -0
  31. vdecoder/hifiganwithsnake/alias/filter.py +95 -0
  32. vdecoder/hifiganwithsnake/alias/resample.py +49 -0
  33. vdecoder/hifiganwithsnake/env.py +15 -0
  34. vdecoder/hifiganwithsnake/models.py +518 -0
  35. vdecoder/hifiganwithsnake/nvSTFT.py +111 -0
  36. vdecoder/hifiganwithsnake/utils.py +68 -0
  37. vdecoder/nsf_hifigan/env.py +15 -0
  38. vdecoder/nsf_hifigan/models.py +439 -0
  39. vdecoder/nsf_hifigan/nvSTFT.py +134 -0
  40. vdecoder/nsf_hifigan/utils.py +68 -0
app.py CHANGED
@@ -82,16 +82,16 @@ if __name__ == '__main__':
82
  for (name, cover, vc_fn) in models:
83
  with gr.TabItem(name):
84
  with gr.Row():
85
- gr.Markdown(
86
- '<div align="center">'
87
- f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else ""
88
- '</div>'
89
- )
90
- with gr.Row():
91
  with gr.Column():
92
  vc_input = gr.Audio(label="输入干声" + ' (小于 20 秒)' if limitation else '')
93
  vc_transform = gr.Number(label="音高调整(支持正负半音,12为一个八度)", value=0)
94
- auto_f0 = gr.Checkbox(label="自动音高预测(说话模式)", value=False)
95
  vc_submit = gr.Button("生成", variant="primary")
96
  with gr.Column():
97
  vc_output1 = gr.Textbox(label="输出信息")
 
82
  for (name, cover, vc_fn) in models:
83
  with gr.TabItem(name):
84
  with gr.Row():
85
+ with gr.Column():
86
+ gr.Markdown(
87
+ '<div align="center">'
88
+ f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else ""
89
+ '</div>'
90
+ )
91
  with gr.Column():
92
  vc_input = gr.Audio(label="输入干声" + ' (小于 20 秒)' if limitation else '')
93
  vc_transform = gr.Number(label="音高调整(支持正负半音,12为一个八度)", value=0)
94
+ auto_f0 = gr.Checkbox(label="自动音高预测(非唱歌音频)", value=False)
95
  vc_submit = gr.Button("生成", variant="primary")
96
  with gr.Column():
97
  vc_output1 = gr.Textbox(label="输出信息")
cluster/kmeans.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math,pdb
2
+ import torch,pynvml
3
+ from torch.nn.functional import normalize
4
+ from time import time
5
+ import numpy as np
6
+ # device=torch.device("cuda:0")
7
+ def _kpp(data: torch.Tensor, k: int, sample_size: int = -1):
8
+ """ Picks k points in the data based on the kmeans++ method.
9
+
10
+ Parameters
11
+ ----------
12
+ data : torch.Tensor
13
+ Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
14
+ data, rank 2 multidimensional data, in which case one
15
+ row is one observation.
16
+ k : int
17
+ Number of samples to generate.
18
+ sample_size : int
19
+ sample data to avoid memory overflow during calculation
20
+
21
+ Returns
22
+ -------
23
+ init : ndarray
24
+ A 'k' by 'N' containing the initial centroids.
25
+
26
+ References
27
+ ----------
28
+ .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
29
+ careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
30
+ on Discrete Algorithms, 2007.
31
+ .. [2] scipy/cluster/vq.py: _kpp
32
+ """
33
+ batch_size=data.shape[0]
34
+ if batch_size>sample_size:
35
+ data = data[torch.randint(0, batch_size,[sample_size], device=data.device)]
36
+ dims = data.shape[1] if len(data.shape) > 1 else 1
37
+ init = torch.zeros((k, dims)).to(data.device)
38
+ r = torch.distributions.uniform.Uniform(0, 1)
39
+ for i in range(k):
40
+ if i == 0:
41
+ init[i, :] = data[torch.randint(data.shape[0], [1])]
42
+ else:
43
+ D2 = torch.cdist(init[:i, :][None, :], data[None, :], p=2)[0].amin(dim=0)
44
+ probs = D2 / torch.sum(D2)
45
+ cumprobs = torch.cumsum(probs, dim=0)
46
+ init[i, :] = data[torch.searchsorted(cumprobs, r.sample([1]).to(data.device))]
47
+ return init
48
+ class KMeansGPU:
49
+ '''
50
+ Kmeans clustering algorithm implemented with PyTorch
51
+
52
+ Parameters:
53
+ n_clusters: int,
54
+ Number of clusters
55
+
56
+ max_iter: int, default: 100
57
+ Maximum number of iterations
58
+
59
+ tol: float, default: 0.0001
60
+ Tolerance
61
+
62
+ verbose: int, default: 0
63
+ Verbosity
64
+
65
+ mode: {'euclidean', 'cosine'}, default: 'euclidean'
66
+ Type of distance measure
67
+
68
+ init_method: {'random', 'point', '++'}
69
+ Type of initialization
70
+
71
+ minibatch: {None, int}, default: None
72
+ Batch size of MinibatchKmeans algorithm
73
+ if None perform full KMeans algorithm
74
+
75
+ Attributes:
76
+ centroids: torch.Tensor, shape: [n_clusters, n_features]
77
+ cluster centroids
78
+ '''
79
+ def __init__(self, n_clusters, max_iter=200, tol=1e-4, verbose=0, mode="euclidean",device=torch.device("cuda:0")):
80
+ self.n_clusters = n_clusters
81
+ self.max_iter = max_iter
82
+ self.tol = tol
83
+ self.verbose = verbose
84
+ self.mode = mode
85
+ self.device=device
86
+ pynvml.nvmlInit()
87
+ gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(device.index)
88
+ info = pynvml.nvmlDeviceGetMemoryInfo(gpu_handle)
89
+ self.minibatch=int(33e6/self.n_clusters*info.free/ 1024 / 1024 / 1024)
90
+ print("free_mem/GB:",info.free/ 1024 / 1024 / 1024,"minibatch:",self.minibatch)
91
+
92
+ @staticmethod
93
+ def cos_sim(a, b):
94
+ """
95
+ Compute cosine similarity of 2 sets of vectors
96
+
97
+ Parameters:
98
+ a: torch.Tensor, shape: [m, n_features]
99
+
100
+ b: torch.Tensor, shape: [n, n_features]
101
+ """
102
+ return normalize(a, dim=-1) @ normalize(b, dim=-1).transpose(-2, -1)
103
+
104
+ @staticmethod
105
+ def euc_sim(a, b):
106
+ """
107
+ Compute euclidean similarity of 2 sets of vectors
108
+ Parameters:
109
+ a: torch.Tensor, shape: [m, n_features]
110
+ b: torch.Tensor, shape: [n, n_features]
111
+ """
112
+ return 2 * a @ b.transpose(-2, -1) -(a**2).sum(dim=1)[..., :, None] - (b**2).sum(dim=1)[..., None, :]
113
+
114
+ def max_sim(self, a, b):
115
+ """
116
+ Compute maximum similarity (or minimum distance) of each vector
117
+ in a with all of the vectors in b
118
+ Parameters:
119
+ a: torch.Tensor, shape: [m, n_features]
120
+ b: torch.Tensor, shape: [n, n_features]
121
+ """
122
+ if self.mode == 'cosine':
123
+ sim_func = self.cos_sim
124
+ elif self.mode == 'euclidean':
125
+ sim_func = self.euc_sim
126
+ sim = sim_func(a, b)
127
+ max_sim_v, max_sim_i = sim.max(dim=-1)
128
+ return max_sim_v, max_sim_i
129
+
130
+ def fit_predict(self, X):
131
+ """
132
+ Combination of fit() and predict() methods.
133
+ This is faster than calling fit() and predict() seperately.
134
+ Parameters:
135
+ X: torch.Tensor, shape: [n_samples, n_features]
136
+ centroids: {torch.Tensor, None}, default: None
137
+ if given, centroids will be initialized with given tensor
138
+ if None, centroids will be randomly chosen from X
139
+ Return:
140
+ labels: torch.Tensor, shape: [n_samples]
141
+
142
+ mini_=33kk/k*remain
143
+ mini=min(mini_,fea_shape)
144
+ offset=log2(k/1000)*1.5
145
+ kpp_all=min(mini_*10/offset,fea_shape)
146
+ kpp_sample=min(mini_/12/offset,fea_shape)
147
+ """
148
+ assert isinstance(X, torch.Tensor), "input must be torch.Tensor"
149
+ assert X.dtype in [torch.half, torch.float, torch.double], "input must be floating point"
150
+ assert X.ndim == 2, "input must be a 2d tensor with shape: [n_samples, n_features] "
151
+ # print("verbose:%s"%self.verbose)
152
+
153
+ offset = np.power(1.5,np.log(self.n_clusters / 1000))/np.log(2)
154
+ with torch.no_grad():
155
+ batch_size= X.shape[0]
156
+ # print(self.minibatch, int(self.minibatch * 10 / offset), batch_size)
157
+ start_time = time()
158
+ if (self.minibatch*10//offset< batch_size):
159
+ x = X[torch.randint(0, batch_size,[int(self.minibatch*10/offset)])].to(self.device)
160
+ else:
161
+ x = X.to(self.device)
162
+ # print(x.device)
163
+ self.centroids = _kpp(x, self.n_clusters, min(int(self.minibatch/12/offset),batch_size))
164
+ del x
165
+ torch.cuda.empty_cache()
166
+ # self.centroids = self.centroids.to(self.device)
167
+ num_points_in_clusters = torch.ones(self.n_clusters, device=self.device, dtype=X.dtype)#全1
168
+ closest = None#[3098036]#int64
169
+ if(self.minibatch>=batch_size//2 and self.minibatch<batch_size):
170
+ X = X[torch.randint(0, batch_size,[self.minibatch])].to(self.device)
171
+ elif(self.minibatch>=batch_size):
172
+ X=X.to(self.device)
173
+ for i in range(self.max_iter):
174
+ iter_time = time()
175
+ if self.minibatch<batch_size//2:#可用minibatch数太小,每次都得从内存倒腾到显存
176
+ x = X[torch.randint(0, batch_size, [self.minibatch])].to(self.device)
177
+ else:#否则直接全部缓存
178
+ x = X
179
+
180
+ closest = self.max_sim(a=x, b=self.centroids)[1].to(torch.int16)#[3098036]#int64#0~999
181
+ matched_clusters, counts = closest.unique(return_counts=True)#int64#1k
182
+ expanded_closest = closest[None].expand(self.n_clusters, -1)#[1000, 3098036]#int16#0~999
183
+ mask = (expanded_closest==torch.arange(self.n_clusters, device=self.device)[:, None]).to(X.dtype)#==后者是int64*1000
184
+ c_grad = mask @ x / mask.sum(-1)[..., :, None]
185
+ c_grad[c_grad!=c_grad] = 0 # remove NaNs
186
+ error = (c_grad - self.centroids).pow(2).sum()
187
+ if self.minibatch is not None:
188
+ lr = 1/num_points_in_clusters[:,None] * 0.9 + 0.1
189
+ else:
190
+ lr = 1
191
+ matched_clusters=matched_clusters.long()
192
+ num_points_in_clusters[matched_clusters] += counts#IndexError: tensors used as indices must be long, byte or bool tensors
193
+ self.centroids = self.centroids * (1-lr) + c_grad * lr
194
+ if self.verbose >= 2:
195
+ print('iter:', i, 'error:', error.item(), 'time spent:', round(time()-iter_time, 4))
196
+ if error <= self.tol:
197
+ break
198
+
199
+ if self.verbose >= 1:
200
+ print(f'used {i+1} iterations ({round(time()-start_time, 4)}s) to cluster {batch_size} items into {self.n_clusters} clusters')
201
+ return closest
cluster/train_cluster.py CHANGED
@@ -1,67 +1,78 @@
 
 
 
1
  import os
2
- from glob import glob
3
  from pathlib import Path
4
- import torch
5
  import logging
6
  import argparse
 
7
  import torch
8
  import numpy as np
9
- from sklearn.cluster import KMeans, MiniBatchKMeans
10
- import tqdm
11
  logging.basicConfig(level=logging.INFO)
12
  logger = logging.getLogger(__name__)
13
- import time
14
- import random
15
-
16
- def train_cluster(in_dir, n_clusters, use_minibatch=True, verbose=False):
17
 
 
18
  logger.info(f"Loading features from {in_dir}")
19
  features = []
20
  nums = 0
21
  for path in tqdm.tqdm(in_dir.glob("*.soft.pt")):
22
- features.append(torch.load(path).squeeze(0).numpy().T)
 
 
23
  # print(features[-1].shape)
24
  features = np.concatenate(features, axis=0)
25
  print(nums, features.nbytes/ 1024**2, "MB , shape:",features.shape, features.dtype)
26
  features = features.astype(np.float32)
27
  logger.info(f"Clustering features of shape: {features.shape}")
28
  t = time.time()
29
- if use_minibatch:
30
- kmeans = MiniBatchKMeans(n_clusters=n_clusters,verbose=verbose, batch_size=4096, max_iter=80).fit(features)
 
 
 
31
  else:
32
- kmeans = KMeans(n_clusters=n_clusters,verbose=verbose).fit(features)
 
 
 
33
  print(time.time()-t, "s")
34
 
35
  x = {
36
- "n_features_in_": kmeans.n_features_in_,
37
- "_n_threads": kmeans._n_threads,
38
- "cluster_centers_": kmeans.cluster_centers_,
39
  }
40
  print("end")
41
 
42
  return x
43
 
44
-
45
  if __name__ == "__main__":
46
-
47
  parser = argparse.ArgumentParser()
48
  parser.add_argument('--dataset', type=Path, default="./dataset/44k",
49
  help='path of training data directory')
50
  parser.add_argument('--output', type=Path, default="logs/44k",
51
  help='path of model output directory')
 
 
 
52
 
53
  args = parser.parse_args()
54
 
55
  checkpoint_dir = args.output
56
  dataset = args.dataset
 
57
  n_clusters = 10000
58
-
59
  ckpt = {}
60
  for spk in os.listdir(dataset):
61
  if os.path.isdir(dataset/spk):
62
  print(f"train kmeans for {spk}...")
63
  in_dir = dataset/spk
64
- x = train_cluster(in_dir, n_clusters, verbose=False)
65
  ckpt[spk] = x
66
 
67
  checkpoint_path = checkpoint_dir / f"kmeans_{n_clusters}.pt"
@@ -70,20 +81,4 @@ if __name__ == "__main__":
70
  ckpt,
71
  checkpoint_path,
72
  )
73
-
74
-
75
- # import cluster
76
- # for spk in tqdm.tqdm(os.listdir("dataset")):
77
- # if os.path.isdir(f"dataset/{spk}"):
78
- # print(f"start kmeans inference for {spk}...")
79
- # for feature_path in tqdm.tqdm(glob(f"dataset/{spk}/*.discrete.npy", recursive=True)):
80
- # mel_path = feature_path.replace(".discrete.npy",".mel.npy")
81
- # mel_spectrogram = np.load(mel_path)
82
- # feature_len = mel_spectrogram.shape[-1]
83
- # c = np.load(feature_path)
84
- # c = utils.tools.repeat_expand_2d(torch.FloatTensor(c), feature_len).numpy()
85
- # feature = c.T
86
- # feature_class = cluster.get_cluster_result(feature, spk)
87
- # np.save(feature_path.replace(".discrete.npy", ".discrete_class.npy"), feature_class)
88
-
89
-
 
1
+ import time,pdb
2
+ import tqdm
3
+ from time import time as ttime
4
  import os
 
5
  from pathlib import Path
 
6
  import logging
7
  import argparse
8
+ from kmeans import KMeansGPU
9
  import torch
10
  import numpy as np
11
+ from sklearn.cluster import KMeans,MiniBatchKMeans
12
+
13
  logging.basicConfig(level=logging.INFO)
14
  logger = logging.getLogger(__name__)
15
+ from time import time as ttime
16
+ import pynvml,torch
 
 
17
 
18
+ def train_cluster(in_dir, n_clusters, use_minibatch=True, verbose=False,use_gpu=False):#gpu_minibatch真拉,虽然库支持但是也不考虑
19
  logger.info(f"Loading features from {in_dir}")
20
  features = []
21
  nums = 0
22
  for path in tqdm.tqdm(in_dir.glob("*.soft.pt")):
23
+ # for name in os.listdir(in_dir):
24
+ # path="%s/%s"%(in_dir,name)
25
+ features.append(torch.load(path,map_location="cpu").squeeze(0).numpy().T)
26
  # print(features[-1].shape)
27
  features = np.concatenate(features, axis=0)
28
  print(nums, features.nbytes/ 1024**2, "MB , shape:",features.shape, features.dtype)
29
  features = features.astype(np.float32)
30
  logger.info(f"Clustering features of shape: {features.shape}")
31
  t = time.time()
32
+ if(use_gpu==False):
33
+ if use_minibatch:
34
+ kmeans = MiniBatchKMeans(n_clusters=n_clusters,verbose=verbose, batch_size=4096, max_iter=80).fit(features)
35
+ else:
36
+ kmeans = KMeans(n_clusters=n_clusters,verbose=verbose).fit(features)
37
  else:
38
+ kmeans = KMeansGPU(n_clusters=n_clusters, mode='euclidean', verbose=2 if verbose else 0,max_iter=500,tol=1e-2)#
39
+ features=torch.from_numpy(features)#.to(device)
40
+ labels = kmeans.fit_predict(features)#
41
+
42
  print(time.time()-t, "s")
43
 
44
  x = {
45
+ "n_features_in_": kmeans.n_features_in_ if use_gpu==False else features.shape[1],
46
+ "_n_threads": kmeans._n_threads if use_gpu==False else 4,
47
+ "cluster_centers_": kmeans.cluster_centers_ if use_gpu==False else kmeans.centroids.cpu().numpy(),
48
  }
49
  print("end")
50
 
51
  return x
52
 
 
53
  if __name__ == "__main__":
 
54
  parser = argparse.ArgumentParser()
55
  parser.add_argument('--dataset', type=Path, default="./dataset/44k",
56
  help='path of training data directory')
57
  parser.add_argument('--output', type=Path, default="logs/44k",
58
  help='path of model output directory')
59
+ parser.add_argument('--gpu',action='store_true', default=False ,
60
+ help='to use GPU')
61
+
62
 
63
  args = parser.parse_args()
64
 
65
  checkpoint_dir = args.output
66
  dataset = args.dataset
67
+ use_gpu = args.gpu
68
  n_clusters = 10000
69
+
70
  ckpt = {}
71
  for spk in os.listdir(dataset):
72
  if os.path.isdir(dataset/spk):
73
  print(f"train kmeans for {spk}...")
74
  in_dir = dataset/spk
75
+ x = train_cluster(in_dir, n_clusters,use_minibatch=False,verbose=False,use_gpu=use_gpu)
76
  ckpt[spk] = x
77
 
78
  checkpoint_path = checkpoint_dir / f"kmeans_{n_clusters}.pt"
 
81
  ckpt,
82
  checkpoint_path,
83
  )
84
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion/__init__.py ADDED
File without changes
diffusion/data_loaders.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import re
4
+ import numpy as np
5
+ import librosa
6
+ import torch
7
+ import random
8
+ from utils import repeat_expand_2d
9
+ from tqdm import tqdm
10
+ from torch.utils.data import Dataset
11
+
12
+ def traverse_dir(
13
+ root_dir,
14
+ extensions,
15
+ amount=None,
16
+ str_include=None,
17
+ str_exclude=None,
18
+ is_pure=False,
19
+ is_sort=False,
20
+ is_ext=True):
21
+
22
+ file_list = []
23
+ cnt = 0
24
+ for root, _, files in os.walk(root_dir):
25
+ for file in files:
26
+ if any([file.endswith(f".{ext}") for ext in extensions]):
27
+ # path
28
+ mix_path = os.path.join(root, file)
29
+ pure_path = mix_path[len(root_dir)+1:] if is_pure else mix_path
30
+
31
+ # amount
32
+ if (amount is not None) and (cnt == amount):
33
+ if is_sort:
34
+ file_list.sort()
35
+ return file_list
36
+
37
+ # check string
38
+ if (str_include is not None) and (str_include not in pure_path):
39
+ continue
40
+ if (str_exclude is not None) and (str_exclude in pure_path):
41
+ continue
42
+
43
+ if not is_ext:
44
+ ext = pure_path.split('.')[-1]
45
+ pure_path = pure_path[:-(len(ext)+1)]
46
+ file_list.append(pure_path)
47
+ cnt += 1
48
+ if is_sort:
49
+ file_list.sort()
50
+ return file_list
51
+
52
+
53
+ def get_data_loaders(args, whole_audio=False):
54
+ data_train = AudioDataset(
55
+ filelists = args.data.training_files,
56
+ waveform_sec=args.data.duration,
57
+ hop_size=args.data.block_size,
58
+ sample_rate=args.data.sampling_rate,
59
+ load_all_data=args.train.cache_all_data,
60
+ whole_audio=whole_audio,
61
+ extensions=args.data.extensions,
62
+ n_spk=args.model.n_spk,
63
+ spk=args.spk,
64
+ device=args.train.cache_device,
65
+ fp16=args.train.cache_fp16,
66
+ use_aug=True)
67
+ loader_train = torch.utils.data.DataLoader(
68
+ data_train ,
69
+ batch_size=args.train.batch_size if not whole_audio else 1,
70
+ shuffle=True,
71
+ num_workers=args.train.num_workers if args.train.cache_device=='cpu' else 0,
72
+ persistent_workers=(args.train.num_workers > 0) if args.train.cache_device=='cpu' else False,
73
+ pin_memory=True if args.train.cache_device=='cpu' else False
74
+ )
75
+ data_valid = AudioDataset(
76
+ filelists = args.data.validation_files,
77
+ waveform_sec=args.data.duration,
78
+ hop_size=args.data.block_size,
79
+ sample_rate=args.data.sampling_rate,
80
+ load_all_data=args.train.cache_all_data,
81
+ whole_audio=True,
82
+ spk=args.spk,
83
+ extensions=args.data.extensions,
84
+ n_spk=args.model.n_spk)
85
+ loader_valid = torch.utils.data.DataLoader(
86
+ data_valid,
87
+ batch_size=1,
88
+ shuffle=False,
89
+ num_workers=0,
90
+ pin_memory=True
91
+ )
92
+ return loader_train, loader_valid
93
+
94
+
95
+ class AudioDataset(Dataset):
96
+ def __init__(
97
+ self,
98
+ filelists,
99
+ waveform_sec,
100
+ hop_size,
101
+ sample_rate,
102
+ spk,
103
+ load_all_data=True,
104
+ whole_audio=False,
105
+ extensions=['wav'],
106
+ n_spk=1,
107
+ device='cpu',
108
+ fp16=False,
109
+ use_aug=False,
110
+ ):
111
+ super().__init__()
112
+
113
+ self.waveform_sec = waveform_sec
114
+ self.sample_rate = sample_rate
115
+ self.hop_size = hop_size
116
+ self.filelists = filelists
117
+ self.whole_audio = whole_audio
118
+ self.use_aug = use_aug
119
+ self.data_buffer={}
120
+ self.pitch_aug_dict = {}
121
+ # np.load(os.path.join(self.path_root, 'pitch_aug_dict.npy'), allow_pickle=True).item()
122
+ if load_all_data:
123
+ print('Load all the data filelists:', filelists)
124
+ else:
125
+ print('Load the f0, volume data filelists:', filelists)
126
+ with open(filelists,"r") as f:
127
+ self.paths = f.read().splitlines()
128
+ for name_ext in tqdm(self.paths, total=len(self.paths)):
129
+ name = os.path.splitext(name_ext)[0]
130
+ path_audio = name_ext
131
+ duration = librosa.get_duration(filename = path_audio, sr = self.sample_rate)
132
+
133
+ path_f0 = name_ext + ".f0.npy"
134
+ f0,_ = np.load(path_f0,allow_pickle=True)
135
+ f0 = torch.from_numpy(np.array(f0,dtype=float)).float().unsqueeze(-1).to(device)
136
+
137
+ path_volume = name_ext + ".vol.npy"
138
+ volume = np.load(path_volume)
139
+ volume = torch.from_numpy(volume).float().unsqueeze(-1).to(device)
140
+
141
+ path_augvol = name_ext + ".aug_vol.npy"
142
+ aug_vol = np.load(path_augvol)
143
+ aug_vol = torch.from_numpy(aug_vol).float().unsqueeze(-1).to(device)
144
+
145
+ if n_spk is not None and n_spk > 1:
146
+ spk_name = name_ext.split("/")[-2]
147
+ spk_id = spk[spk_name] if spk_name in spk else 0
148
+ if spk_id < 0 or spk_id >= n_spk:
149
+ raise ValueError(' [x] Muiti-speaker traing error : spk_id must be a positive integer from 0 to n_spk-1 ')
150
+ else:
151
+ spk_id = 0
152
+ spk_id = torch.LongTensor(np.array([spk_id])).to(device)
153
+
154
+ if load_all_data:
155
+ '''
156
+ audio, sr = librosa.load(path_audio, sr=self.sample_rate)
157
+ if len(audio.shape) > 1:
158
+ audio = librosa.to_mono(audio)
159
+ audio = torch.from_numpy(audio).to(device)
160
+ '''
161
+ path_mel = name_ext + ".mel.npy"
162
+ mel = np.load(path_mel)
163
+ mel = torch.from_numpy(mel).to(device)
164
+
165
+ path_augmel = name_ext + ".aug_mel.npy"
166
+ aug_mel,keyshift = np.load(path_augmel, allow_pickle=True)
167
+ aug_mel = np.array(aug_mel,dtype=float)
168
+ aug_mel = torch.from_numpy(aug_mel).to(device)
169
+ self.pitch_aug_dict[name_ext] = keyshift
170
+
171
+ path_units = name_ext + ".soft.pt"
172
+ units = torch.load(path_units).to(device)
173
+ units = units[0]
174
+ units = repeat_expand_2d(units,f0.size(0)).transpose(0,1)
175
+
176
+ if fp16:
177
+ mel = mel.half()
178
+ aug_mel = aug_mel.half()
179
+ units = units.half()
180
+
181
+ self.data_buffer[name_ext] = {
182
+ 'duration': duration,
183
+ 'mel': mel,
184
+ 'aug_mel': aug_mel,
185
+ 'units': units,
186
+ 'f0': f0,
187
+ 'volume': volume,
188
+ 'aug_vol': aug_vol,
189
+ 'spk_id': spk_id
190
+ }
191
+ else:
192
+ path_augmel = name_ext + ".aug_mel.npy"
193
+ aug_mel,keyshift = np.load(path_augmel, allow_pickle=True)
194
+ self.pitch_aug_dict[name_ext] = keyshift
195
+ self.data_buffer[name_ext] = {
196
+ 'duration': duration,
197
+ 'f0': f0,
198
+ 'volume': volume,
199
+ 'aug_vol': aug_vol,
200
+ 'spk_id': spk_id
201
+ }
202
+
203
+
204
+ def __getitem__(self, file_idx):
205
+ name_ext = self.paths[file_idx]
206
+ data_buffer = self.data_buffer[name_ext]
207
+ # check duration. if too short, then skip
208
+ if data_buffer['duration'] < (self.waveform_sec + 0.1):
209
+ return self.__getitem__( (file_idx + 1) % len(self.paths))
210
+
211
+ # get item
212
+ return self.get_data(name_ext, data_buffer)
213
+
214
+ def get_data(self, name_ext, data_buffer):
215
+ name = os.path.splitext(name_ext)[0]
216
+ frame_resolution = self.hop_size / self.sample_rate
217
+ duration = data_buffer['duration']
218
+ waveform_sec = duration if self.whole_audio else self.waveform_sec
219
+
220
+ # load audio
221
+ idx_from = 0 if self.whole_audio else random.uniform(0, duration - waveform_sec - 0.1)
222
+ start_frame = int(idx_from / frame_resolution)
223
+ units_frame_len = int(waveform_sec / frame_resolution)
224
+ aug_flag = random.choice([True, False]) and self.use_aug
225
+ '''
226
+ audio = data_buffer.get('audio')
227
+ if audio is None:
228
+ path_audio = os.path.join(self.path_root, 'audio', name) + '.wav'
229
+ audio, sr = librosa.load(
230
+ path_audio,
231
+ sr = self.sample_rate,
232
+ offset = start_frame * frame_resolution,
233
+ duration = waveform_sec)
234
+ if len(audio.shape) > 1:
235
+ audio = librosa.to_mono(audio)
236
+ # clip audio into N seconds
237
+ audio = audio[ : audio.shape[-1] // self.hop_size * self.hop_size]
238
+ audio = torch.from_numpy(audio).float()
239
+ else:
240
+ audio = audio[start_frame * self.hop_size : (start_frame + units_frame_len) * self.hop_size]
241
+ '''
242
+ # load mel
243
+ mel_key = 'aug_mel' if aug_flag else 'mel'
244
+ mel = data_buffer.get(mel_key)
245
+ if mel is None:
246
+ mel = name_ext + ".mel.npy"
247
+ mel = np.load(mel)
248
+ mel = mel[start_frame : start_frame + units_frame_len]
249
+ mel = torch.from_numpy(mel).float()
250
+ else:
251
+ mel = mel[start_frame : start_frame + units_frame_len]
252
+
253
+ # load f0
254
+ f0 = data_buffer.get('f0')
255
+ aug_shift = 0
256
+ if aug_flag:
257
+ aug_shift = self.pitch_aug_dict[name_ext]
258
+ f0_frames = 2 ** (aug_shift / 12) * f0[start_frame : start_frame + units_frame_len]
259
+
260
+ # load units
261
+ units = data_buffer.get('units')
262
+ if units is None:
263
+ path_units = name_ext + ".soft.pt"
264
+ units = torch.load(path_units)
265
+ units = units[0]
266
+ units = repeat_expand_2d(units,f0.size(0)).transpose(0,1)
267
+
268
+ units = units[start_frame : start_frame + units_frame_len]
269
+
270
+ # load volume
271
+ vol_key = 'aug_vol' if aug_flag else 'volume'
272
+ volume = data_buffer.get(vol_key)
273
+ volume_frames = volume[start_frame : start_frame + units_frame_len]
274
+
275
+ # load spk_id
276
+ spk_id = data_buffer.get('spk_id')
277
+
278
+ # load shift
279
+ aug_shift = torch.from_numpy(np.array([[aug_shift]])).float()
280
+
281
+ return dict(mel=mel, f0=f0_frames, volume=volume_frames, units=units, spk_id=spk_id, aug_shift=aug_shift, name=name, name_ext=name_ext)
282
+
283
+ def __len__(self):
284
+ return len(self.paths)
diffusion/diffusion.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import deque
2
+ from functools import partial
3
+ from inspect import isfunction
4
+ import torch.nn.functional as F
5
+ import librosa.sequence
6
+ import numpy as np
7
+ import torch
8
+ from torch import nn
9
+ from tqdm import tqdm
10
+
11
+
12
+ def exists(x):
13
+ return x is not None
14
+
15
+
16
+ def default(val, d):
17
+ if exists(val):
18
+ return val
19
+ return d() if isfunction(d) else d
20
+
21
+
22
+ def extract(a, t, x_shape):
23
+ b, *_ = t.shape
24
+ out = a.gather(-1, t)
25
+ return out.reshape(b, *((1,) * (len(x_shape) - 1)))
26
+
27
+
28
+ def noise_like(shape, device, repeat=False):
29
+ repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
30
+ noise = lambda: torch.randn(shape, device=device)
31
+ return repeat_noise() if repeat else noise()
32
+
33
+
34
+ def linear_beta_schedule(timesteps, max_beta=0.02):
35
+ """
36
+ linear schedule
37
+ """
38
+ betas = np.linspace(1e-4, max_beta, timesteps)
39
+ return betas
40
+
41
+
42
+ def cosine_beta_schedule(timesteps, s=0.008):
43
+ """
44
+ cosine schedule
45
+ as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
46
+ """
47
+ steps = timesteps + 1
48
+ x = np.linspace(0, steps, steps)
49
+ alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
50
+ alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
51
+ betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
52
+ return np.clip(betas, a_min=0, a_max=0.999)
53
+
54
+
55
+ beta_schedule = {
56
+ "cosine": cosine_beta_schedule,
57
+ "linear": linear_beta_schedule,
58
+ }
59
+
60
+
61
+ class GaussianDiffusion(nn.Module):
62
+ def __init__(self,
63
+ denoise_fn,
64
+ out_dims=128,
65
+ timesteps=1000,
66
+ k_step=1000,
67
+ max_beta=0.02,
68
+ spec_min=-12,
69
+ spec_max=2):
70
+ super().__init__()
71
+ self.denoise_fn = denoise_fn
72
+ self.out_dims = out_dims
73
+ betas = beta_schedule['linear'](timesteps, max_beta=max_beta)
74
+
75
+ alphas = 1. - betas
76
+ alphas_cumprod = np.cumprod(alphas, axis=0)
77
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
78
+
79
+ timesteps, = betas.shape
80
+ self.num_timesteps = int(timesteps)
81
+ self.k_step = k_step
82
+
83
+ self.noise_list = deque(maxlen=4)
84
+
85
+ to_torch = partial(torch.tensor, dtype=torch.float32)
86
+
87
+ self.register_buffer('betas', to_torch(betas))
88
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
89
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
90
+
91
+ # calculations for diffusion q(x_t | x_{t-1}) and others
92
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
93
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
94
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
95
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
96
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
97
+
98
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
99
+ posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
100
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
101
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
102
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
103
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
104
+ self.register_buffer('posterior_mean_coef1', to_torch(
105
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
106
+ self.register_buffer('posterior_mean_coef2', to_torch(
107
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
108
+
109
+ self.register_buffer('spec_min', torch.FloatTensor([spec_min])[None, None, :out_dims])
110
+ self.register_buffer('spec_max', torch.FloatTensor([spec_max])[None, None, :out_dims])
111
+
112
+ def q_mean_variance(self, x_start, t):
113
+ mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
114
+ variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
115
+ log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
116
+ return mean, variance, log_variance
117
+
118
+ def predict_start_from_noise(self, x_t, t, noise):
119
+ return (
120
+ extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
121
+ extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
122
+ )
123
+
124
+ def q_posterior(self, x_start, x_t, t):
125
+ posterior_mean = (
126
+ extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
127
+ extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
128
+ )
129
+ posterior_variance = extract(self.posterior_variance, t, x_t.shape)
130
+ posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
131
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
132
+
133
+ def p_mean_variance(self, x, t, cond):
134
+ noise_pred = self.denoise_fn(x, t, cond=cond)
135
+ x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred)
136
+
137
+ x_recon.clamp_(-1., 1.)
138
+
139
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
140
+ return model_mean, posterior_variance, posterior_log_variance
141
+
142
+ @torch.no_grad()
143
+ def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False):
144
+ b, *_, device = *x.shape, x.device
145
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond)
146
+ noise = noise_like(x.shape, device, repeat_noise)
147
+ # no noise when t == 0
148
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
149
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
150
+
151
+ @torch.no_grad()
152
+ def p_sample_plms(self, x, t, interval, cond, clip_denoised=True, repeat_noise=False):
153
+ """
154
+ Use the PLMS method from
155
+ [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778).
156
+ """
157
+
158
+ def get_x_pred(x, noise_t, t):
159
+ a_t = extract(self.alphas_cumprod, t, x.shape)
160
+ a_prev = extract(self.alphas_cumprod, torch.max(t - interval, torch.zeros_like(t)), x.shape)
161
+ a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt()
162
+
163
+ x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x - 1 / (
164
+ a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t)
165
+ x_pred = x + x_delta
166
+
167
+ return x_pred
168
+
169
+ noise_list = self.noise_list
170
+ noise_pred = self.denoise_fn(x, t, cond=cond)
171
+
172
+ if len(noise_list) == 0:
173
+ x_pred = get_x_pred(x, noise_pred, t)
174
+ noise_pred_prev = self.denoise_fn(x_pred, max(t - interval, 0), cond=cond)
175
+ noise_pred_prime = (noise_pred + noise_pred_prev) / 2
176
+ elif len(noise_list) == 1:
177
+ noise_pred_prime = (3 * noise_pred - noise_list[-1]) / 2
178
+ elif len(noise_list) == 2:
179
+ noise_pred_prime = (23 * noise_pred - 16 * noise_list[-1] + 5 * noise_list[-2]) / 12
180
+ else:
181
+ noise_pred_prime = (55 * noise_pred - 59 * noise_list[-1] + 37 * noise_list[-2] - 9 * noise_list[-3]) / 24
182
+
183
+ x_prev = get_x_pred(x, noise_pred_prime, t)
184
+ noise_list.append(noise_pred)
185
+
186
+ return x_prev
187
+
188
+ def q_sample(self, x_start, t, noise=None):
189
+ noise = default(noise, lambda: torch.randn_like(x_start))
190
+ return (
191
+ extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
192
+ extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
193
+ )
194
+
195
+ def p_losses(self, x_start, t, cond, noise=None, loss_type='l2'):
196
+ noise = default(noise, lambda: torch.randn_like(x_start))
197
+
198
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
199
+ x_recon = self.denoise_fn(x_noisy, t, cond)
200
+
201
+ if loss_type == 'l1':
202
+ loss = (noise - x_recon).abs().mean()
203
+ elif loss_type == 'l2':
204
+ loss = F.mse_loss(noise, x_recon)
205
+ else:
206
+ raise NotImplementedError()
207
+
208
+ return loss
209
+
210
+ def forward(self,
211
+ condition,
212
+ gt_spec=None,
213
+ infer=True,
214
+ infer_speedup=10,
215
+ method='dpm-solver',
216
+ k_step=300,
217
+ use_tqdm=True):
218
+ """
219
+ conditioning diffusion, use fastspeech2 encoder output as the condition
220
+ """
221
+ cond = condition.transpose(1, 2)
222
+ b, device = condition.shape[0], condition.device
223
+
224
+ if not infer:
225
+ spec = self.norm_spec(gt_spec)
226
+ t = torch.randint(0, self.k_step, (b,), device=device).long()
227
+ norm_spec = spec.transpose(1, 2)[:, None, :, :] # [B, 1, M, T]
228
+ return self.p_losses(norm_spec, t, cond=cond)
229
+ else:
230
+ shape = (cond.shape[0], 1, self.out_dims, cond.shape[2])
231
+
232
+ if gt_spec is None:
233
+ t = self.k_step
234
+ x = torch.randn(shape, device=device)
235
+ else:
236
+ t = k_step
237
+ norm_spec = self.norm_spec(gt_spec)
238
+ norm_spec = norm_spec.transpose(1, 2)[:, None, :, :]
239
+ x = self.q_sample(x_start=norm_spec, t=torch.tensor([t - 1], device=device).long())
240
+
241
+ if method is not None and infer_speedup > 1:
242
+ if method == 'dpm-solver':
243
+ from .dpm_solver_pytorch import NoiseScheduleVP, model_wrapper, DPM_Solver
244
+ # 1. Define the noise schedule.
245
+ noise_schedule = NoiseScheduleVP(schedule='discrete', betas=self.betas[:t])
246
+
247
+ # 2. Convert your discrete-time `model` to the continuous-time
248
+ # noise prediction model. Here is an example for a diffusion model
249
+ # `model` with the noise prediction type ("noise") .
250
+ def my_wrapper(fn):
251
+ def wrapped(x, t, **kwargs):
252
+ ret = fn(x, t, **kwargs)
253
+ if use_tqdm:
254
+ self.bar.update(1)
255
+ return ret
256
+
257
+ return wrapped
258
+
259
+ model_fn = model_wrapper(
260
+ my_wrapper(self.denoise_fn),
261
+ noise_schedule,
262
+ model_type="noise", # or "x_start" or "v" or "score"
263
+ model_kwargs={"cond": cond}
264
+ )
265
+
266
+ # 3. Define dpm-solver and sample by singlestep DPM-Solver.
267
+ # (We recommend singlestep DPM-Solver for unconditional sampling)
268
+ # You can adjust the `steps` to balance the computation
269
+ # costs and the sample quality.
270
+ dpm_solver = DPM_Solver(model_fn, noise_schedule)
271
+
272
+ steps = t // infer_speedup
273
+ if use_tqdm:
274
+ self.bar = tqdm(desc="sample time step", total=steps)
275
+ x = dpm_solver.sample(
276
+ x,
277
+ steps=steps,
278
+ order=3,
279
+ skip_type="time_uniform",
280
+ method="singlestep",
281
+ )
282
+ if use_tqdm:
283
+ self.bar.close()
284
+ elif method == 'pndm':
285
+ self.noise_list = deque(maxlen=4)
286
+ if use_tqdm:
287
+ for i in tqdm(
288
+ reversed(range(0, t, infer_speedup)), desc='sample time step',
289
+ total=t // infer_speedup,
290
+ ):
291
+ x = self.p_sample_plms(
292
+ x, torch.full((b,), i, device=device, dtype=torch.long),
293
+ infer_speedup, cond=cond
294
+ )
295
+ else:
296
+ for i in reversed(range(0, t, infer_speedup)):
297
+ x = self.p_sample_plms(
298
+ x, torch.full((b,), i, device=device, dtype=torch.long),
299
+ infer_speedup, cond=cond
300
+ )
301
+ else:
302
+ raise NotImplementedError(method)
303
+ else:
304
+ if use_tqdm:
305
+ for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t):
306
+ x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
307
+ else:
308
+ for i in reversed(range(0, t)):
309
+ x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
310
+ x = x.squeeze(1).transpose(1, 2) # [B, T, M]
311
+ return self.denorm_spec(x)
312
+
313
+ def norm_spec(self, x):
314
+ return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1
315
+
316
+ def denorm_spec(self, x):
317
+ return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min
diffusion/diffusion_onnx.py ADDED
@@ -0,0 +1,612 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import deque
2
+ from functools import partial
3
+ from inspect import isfunction
4
+ import torch.nn.functional as F
5
+ import librosa.sequence
6
+ import numpy as np
7
+ from torch.nn import Conv1d
8
+ from torch.nn import Mish
9
+ import torch
10
+ from torch import nn
11
+ from tqdm import tqdm
12
+ import math
13
+
14
+
15
+ def exists(x):
16
+ return x is not None
17
+
18
+
19
+ def default(val, d):
20
+ if exists(val):
21
+ return val
22
+ return d() if isfunction(d) else d
23
+
24
+
25
+ def extract(a, t):
26
+ return a[t].reshape((1, 1, 1, 1))
27
+
28
+
29
+ def noise_like(shape, device, repeat=False):
30
+ repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
31
+ noise = lambda: torch.randn(shape, device=device)
32
+ return repeat_noise() if repeat else noise()
33
+
34
+
35
+ def linear_beta_schedule(timesteps, max_beta=0.02):
36
+ """
37
+ linear schedule
38
+ """
39
+ betas = np.linspace(1e-4, max_beta, timesteps)
40
+ return betas
41
+
42
+
43
+ def cosine_beta_schedule(timesteps, s=0.008):
44
+ """
45
+ cosine schedule
46
+ as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
47
+ """
48
+ steps = timesteps + 1
49
+ x = np.linspace(0, steps, steps)
50
+ alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
51
+ alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
52
+ betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
53
+ return np.clip(betas, a_min=0, a_max=0.999)
54
+
55
+
56
+ beta_schedule = {
57
+ "cosine": cosine_beta_schedule,
58
+ "linear": linear_beta_schedule,
59
+ }
60
+
61
+
62
+ def extract_1(a, t):
63
+ return a[t].reshape((1, 1, 1, 1))
64
+
65
+
66
+ def predict_stage0(noise_pred, noise_pred_prev):
67
+ return (noise_pred + noise_pred_prev) / 2
68
+
69
+
70
+ def predict_stage1(noise_pred, noise_list):
71
+ return (noise_pred * 3
72
+ - noise_list[-1]) / 2
73
+
74
+
75
+ def predict_stage2(noise_pred, noise_list):
76
+ return (noise_pred * 23
77
+ - noise_list[-1] * 16
78
+ + noise_list[-2] * 5) / 12
79
+
80
+
81
+ def predict_stage3(noise_pred, noise_list):
82
+ return (noise_pred * 55
83
+ - noise_list[-1] * 59
84
+ + noise_list[-2] * 37
85
+ - noise_list[-3] * 9) / 24
86
+
87
+
88
+ class SinusoidalPosEmb(nn.Module):
89
+ def __init__(self, dim):
90
+ super().__init__()
91
+ self.dim = dim
92
+ self.half_dim = dim // 2
93
+ self.emb = 9.21034037 / (self.half_dim - 1)
94
+ self.emb = torch.exp(torch.arange(self.half_dim) * torch.tensor(-self.emb)).unsqueeze(0)
95
+ self.emb = self.emb.cpu()
96
+
97
+ def forward(self, x):
98
+ emb = self.emb * x
99
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
100
+ return emb
101
+
102
+
103
+ class ResidualBlock(nn.Module):
104
+ def __init__(self, encoder_hidden, residual_channels, dilation):
105
+ super().__init__()
106
+ self.residual_channels = residual_channels
107
+ self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation)
108
+ self.diffusion_projection = nn.Linear(residual_channels, residual_channels)
109
+ self.conditioner_projection = Conv1d(encoder_hidden, 2 * residual_channels, 1)
110
+ self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1)
111
+
112
+ def forward(self, x, conditioner, diffusion_step):
113
+ diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
114
+ conditioner = self.conditioner_projection(conditioner)
115
+ y = x + diffusion_step
116
+ y = self.dilated_conv(y) + conditioner
117
+
118
+ gate, filter_1 = torch.split(y, [self.residual_channels, self.residual_channels], dim=1)
119
+
120
+ y = torch.sigmoid(gate) * torch.tanh(filter_1)
121
+ y = self.output_projection(y)
122
+
123
+ residual, skip = torch.split(y, [self.residual_channels, self.residual_channels], dim=1)
124
+
125
+ return (x + residual) / 1.41421356, skip
126
+
127
+
128
+ class DiffNet(nn.Module):
129
+ def __init__(self, in_dims, n_layers, n_chans, n_hidden):
130
+ super().__init__()
131
+ self.encoder_hidden = n_hidden
132
+ self.residual_layers = n_layers
133
+ self.residual_channels = n_chans
134
+ self.input_projection = Conv1d(in_dims, self.residual_channels, 1)
135
+ self.diffusion_embedding = SinusoidalPosEmb(self.residual_channels)
136
+ dim = self.residual_channels
137
+ self.mlp = nn.Sequential(
138
+ nn.Linear(dim, dim * 4),
139
+ Mish(),
140
+ nn.Linear(dim * 4, dim)
141
+ )
142
+ self.residual_layers = nn.ModuleList([
143
+ ResidualBlock(self.encoder_hidden, self.residual_channels, 1)
144
+ for i in range(self.residual_layers)
145
+ ])
146
+ self.skip_projection = Conv1d(self.residual_channels, self.residual_channels, 1)
147
+ self.output_projection = Conv1d(self.residual_channels, in_dims, 1)
148
+ nn.init.zeros_(self.output_projection.weight)
149
+
150
+ def forward(self, spec, diffusion_step, cond):
151
+ x = spec.squeeze(0)
152
+ x = self.input_projection(x) # x [B, residual_channel, T]
153
+ x = F.relu(x)
154
+ # skip = torch.randn_like(x)
155
+ diffusion_step = diffusion_step.float()
156
+ diffusion_step = self.diffusion_embedding(diffusion_step)
157
+ diffusion_step = self.mlp(diffusion_step)
158
+
159
+ x, skip = self.residual_layers[0](x, cond, diffusion_step)
160
+ # noinspection PyTypeChecker
161
+ for layer in self.residual_layers[1:]:
162
+ x, skip_connection = layer.forward(x, cond, diffusion_step)
163
+ skip = skip + skip_connection
164
+ x = skip / math.sqrt(len(self.residual_layers))
165
+ x = self.skip_projection(x)
166
+ x = F.relu(x)
167
+ x = self.output_projection(x) # [B, 80, T]
168
+ return x.unsqueeze(1)
169
+
170
+
171
+ class AfterDiffusion(nn.Module):
172
+ def __init__(self, spec_max, spec_min, v_type='a'):
173
+ super().__init__()
174
+ self.spec_max = spec_max
175
+ self.spec_min = spec_min
176
+ self.type = v_type
177
+
178
+ def forward(self, x):
179
+ x = x.squeeze(1).permute(0, 2, 1)
180
+ mel_out = (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min
181
+ if self.type == 'nsf-hifigan-log10':
182
+ mel_out = mel_out * 0.434294
183
+ return mel_out.transpose(2, 1)
184
+
185
+
186
+ class Pred(nn.Module):
187
+ def __init__(self, alphas_cumprod):
188
+ super().__init__()
189
+ self.alphas_cumprod = alphas_cumprod
190
+
191
+ def forward(self, x_1, noise_t, t_1, t_prev):
192
+ a_t = extract(self.alphas_cumprod, t_1).cpu()
193
+ a_prev = extract(self.alphas_cumprod, t_prev).cpu()
194
+ a_t_sq, a_prev_sq = a_t.sqrt().cpu(), a_prev.sqrt().cpu()
195
+ x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x_1 - 1 / (
196
+ a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t)
197
+ x_pred = x_1 + x_delta.cpu()
198
+
199
+ return x_pred
200
+
201
+
202
+ class GaussianDiffusion(nn.Module):
203
+ def __init__(self,
204
+ out_dims=128,
205
+ n_layers=20,
206
+ n_chans=384,
207
+ n_hidden=256,
208
+ timesteps=1000,
209
+ k_step=1000,
210
+ max_beta=0.02,
211
+ spec_min=-12,
212
+ spec_max=2):
213
+ super().__init__()
214
+ self.denoise_fn = DiffNet(out_dims, n_layers, n_chans, n_hidden)
215
+ self.out_dims = out_dims
216
+ self.mel_bins = out_dims
217
+ self.n_hidden = n_hidden
218
+ betas = beta_schedule['linear'](timesteps, max_beta=max_beta)
219
+
220
+ alphas = 1. - betas
221
+ alphas_cumprod = np.cumprod(alphas, axis=0)
222
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
223
+ timesteps, = betas.shape
224
+ self.num_timesteps = int(timesteps)
225
+ self.k_step = k_step
226
+
227
+ self.noise_list = deque(maxlen=4)
228
+
229
+ to_torch = partial(torch.tensor, dtype=torch.float32)
230
+
231
+ self.register_buffer('betas', to_torch(betas))
232
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
233
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
234
+
235
+ # calculations for diffusion q(x_t | x_{t-1}) and others
236
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
237
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
238
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
239
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
240
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
241
+
242
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
243
+ posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
244
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
245
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
246
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
247
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
248
+ self.register_buffer('posterior_mean_coef1', to_torch(
249
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
250
+ self.register_buffer('posterior_mean_coef2', to_torch(
251
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
252
+
253
+ self.register_buffer('spec_min', torch.FloatTensor([spec_min])[None, None, :out_dims])
254
+ self.register_buffer('spec_max', torch.FloatTensor([spec_max])[None, None, :out_dims])
255
+ self.ad = AfterDiffusion(self.spec_max, self.spec_min)
256
+ self.xp = Pred(self.alphas_cumprod)
257
+
258
+ def q_mean_variance(self, x_start, t):
259
+ mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
260
+ variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
261
+ log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
262
+ return mean, variance, log_variance
263
+
264
+ def predict_start_from_noise(self, x_t, t, noise):
265
+ return (
266
+ extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
267
+ extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
268
+ )
269
+
270
+ def q_posterior(self, x_start, x_t, t):
271
+ posterior_mean = (
272
+ extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
273
+ extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
274
+ )
275
+ posterior_variance = extract(self.posterior_variance, t, x_t.shape)
276
+ posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
277
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
278
+
279
+ def p_mean_variance(self, x, t, cond):
280
+ noise_pred = self.denoise_fn(x, t, cond=cond)
281
+ x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred)
282
+
283
+ x_recon.clamp_(-1., 1.)
284
+
285
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
286
+ return model_mean, posterior_variance, posterior_log_variance
287
+
288
+ @torch.no_grad()
289
+ def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False):
290
+ b, *_, device = *x.shape, x.device
291
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond)
292
+ noise = noise_like(x.shape, device, repeat_noise)
293
+ # no noise when t == 0
294
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
295
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
296
+
297
+ @torch.no_grad()
298
+ def p_sample_plms(self, x, t, interval, cond, clip_denoised=True, repeat_noise=False):
299
+ """
300
+ Use the PLMS method from
301
+ [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778).
302
+ """
303
+
304
+ def get_x_pred(x, noise_t, t):
305
+ a_t = extract(self.alphas_cumprod, t)
306
+ a_prev = extract(self.alphas_cumprod, torch.max(t - interval, torch.zeros_like(t)))
307
+ a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt()
308
+
309
+ x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x - 1 / (
310
+ a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t)
311
+ x_pred = x + x_delta
312
+
313
+ return x_pred
314
+
315
+ noise_list = self.noise_list
316
+ noise_pred = self.denoise_fn(x, t, cond=cond)
317
+
318
+ if len(noise_list) == 0:
319
+ x_pred = get_x_pred(x, noise_pred, t)
320
+ noise_pred_prev = self.denoise_fn(x_pred, max(t - interval, 0), cond=cond)
321
+ noise_pred_prime = (noise_pred + noise_pred_prev) / 2
322
+ elif len(noise_list) == 1:
323
+ noise_pred_prime = (3 * noise_pred - noise_list[-1]) / 2
324
+ elif len(noise_list) == 2:
325
+ noise_pred_prime = (23 * noise_pred - 16 * noise_list[-1] + 5 * noise_list[-2]) / 12
326
+ else:
327
+ noise_pred_prime = (55 * noise_pred - 59 * noise_list[-1] + 37 * noise_list[-2] - 9 * noise_list[-3]) / 24
328
+
329
+ x_prev = get_x_pred(x, noise_pred_prime, t)
330
+ noise_list.append(noise_pred)
331
+
332
+ return x_prev
333
+
334
+ def q_sample(self, x_start, t, noise=None):
335
+ noise = default(noise, lambda: torch.randn_like(x_start))
336
+ return (
337
+ extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
338
+ extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
339
+ )
340
+
341
+ def p_losses(self, x_start, t, cond, noise=None, loss_type='l2'):
342
+ noise = default(noise, lambda: torch.randn_like(x_start))
343
+
344
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
345
+ x_recon = self.denoise_fn(x_noisy, t, cond)
346
+
347
+ if loss_type == 'l1':
348
+ loss = (noise - x_recon).abs().mean()
349
+ elif loss_type == 'l2':
350
+ loss = F.mse_loss(noise, x_recon)
351
+ else:
352
+ raise NotImplementedError()
353
+
354
+ return loss
355
+
356
+ def org_forward(self,
357
+ condition,
358
+ init_noise=None,
359
+ gt_spec=None,
360
+ infer=True,
361
+ infer_speedup=100,
362
+ method='pndm',
363
+ k_step=1000,
364
+ use_tqdm=True):
365
+ """
366
+ conditioning diffusion, use fastspeech2 encoder output as the condition
367
+ """
368
+ cond = condition
369
+ b, device = condition.shape[0], condition.device
370
+ if not infer:
371
+ spec = self.norm_spec(gt_spec)
372
+ t = torch.randint(0, self.k_step, (b,), device=device).long()
373
+ norm_spec = spec.transpose(1, 2)[:, None, :, :] # [B, 1, M, T]
374
+ return self.p_losses(norm_spec, t, cond=cond)
375
+ else:
376
+ shape = (cond.shape[0], 1, self.out_dims, cond.shape[2])
377
+
378
+ if gt_spec is None:
379
+ t = self.k_step
380
+ if init_noise is None:
381
+ x = torch.randn(shape, device=device)
382
+ else:
383
+ x = init_noise
384
+ else:
385
+ t = k_step
386
+ norm_spec = self.norm_spec(gt_spec)
387
+ norm_spec = norm_spec.transpose(1, 2)[:, None, :, :]
388
+ x = self.q_sample(x_start=norm_spec, t=torch.tensor([t - 1], device=device).long())
389
+
390
+ if method is not None and infer_speedup > 1:
391
+ if method == 'dpm-solver':
392
+ from .dpm_solver_pytorch import NoiseScheduleVP, model_wrapper, DPM_Solver
393
+ # 1. Define the noise schedule.
394
+ noise_schedule = NoiseScheduleVP(schedule='discrete', betas=self.betas[:t])
395
+
396
+ # 2. Convert your discrete-time `model` to the continuous-time
397
+ # noise prediction model. Here is an example for a diffusion model
398
+ # `model` with the noise prediction type ("noise") .
399
+ def my_wrapper(fn):
400
+ def wrapped(x, t, **kwargs):
401
+ ret = fn(x, t, **kwargs)
402
+ if use_tqdm:
403
+ self.bar.update(1)
404
+ return ret
405
+
406
+ return wrapped
407
+
408
+ model_fn = model_wrapper(
409
+ my_wrapper(self.denoise_fn),
410
+ noise_schedule,
411
+ model_type="noise", # or "x_start" or "v" or "score"
412
+ model_kwargs={"cond": cond}
413
+ )
414
+
415
+ # 3. Define dpm-solver and sample by singlestep DPM-Solver.
416
+ # (We recommend singlestep DPM-Solver for unconditional sampling)
417
+ # You can adjust the `steps` to balance the computation
418
+ # costs and the sample quality.
419
+ dpm_solver = DPM_Solver(model_fn, noise_schedule)
420
+
421
+ steps = t // infer_speedup
422
+ if use_tqdm:
423
+ self.bar = tqdm(desc="sample time step", total=steps)
424
+ x = dpm_solver.sample(
425
+ x,
426
+ steps=steps,
427
+ order=3,
428
+ skip_type="time_uniform",
429
+ method="singlestep",
430
+ )
431
+ if use_tqdm:
432
+ self.bar.close()
433
+ elif method == 'pndm':
434
+ self.noise_list = deque(maxlen=4)
435
+ if use_tqdm:
436
+ for i in tqdm(
437
+ reversed(range(0, t, infer_speedup)), desc='sample time step',
438
+ total=t // infer_speedup,
439
+ ):
440
+ x = self.p_sample_plms(
441
+ x, torch.full((b,), i, device=device, dtype=torch.long),
442
+ infer_speedup, cond=cond
443
+ )
444
+ else:
445
+ for i in reversed(range(0, t, infer_speedup)):
446
+ x = self.p_sample_plms(
447
+ x, torch.full((b,), i, device=device, dtype=torch.long),
448
+ infer_speedup, cond=cond
449
+ )
450
+ else:
451
+ raise NotImplementedError(method)
452
+ else:
453
+ if use_tqdm:
454
+ for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t):
455
+ x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
456
+ else:
457
+ for i in reversed(range(0, t)):
458
+ x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
459
+ x = x.squeeze(1).transpose(1, 2) # [B, T, M]
460
+ return self.denorm_spec(x).transpose(2, 1)
461
+
462
+ def norm_spec(self, x):
463
+ return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1
464
+
465
+ def denorm_spec(self, x):
466
+ return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min
467
+
468
+ def get_x_pred(self, x_1, noise_t, t_1, t_prev):
469
+ a_t = extract(self.alphas_cumprod, t_1)
470
+ a_prev = extract(self.alphas_cumprod, t_prev)
471
+ a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt()
472
+ x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x_1 - 1 / (
473
+ a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t)
474
+ x_pred = x_1 + x_delta
475
+ return x_pred
476
+
477
+ def OnnxExport(self, project_name=None, init_noise=None, hidden_channels=256, export_denoise=True, export_pred=True, export_after=True):
478
+ cond = torch.randn([1, self.n_hidden, 10]).cpu()
479
+ if init_noise is None:
480
+ x = torch.randn((1, 1, self.mel_bins, cond.shape[2]), dtype=torch.float32).cpu()
481
+ else:
482
+ x = init_noise
483
+ pndms = 100
484
+
485
+ org_y_x = self.org_forward(cond, init_noise=x)
486
+
487
+ device = cond.device
488
+ n_frames = cond.shape[2]
489
+ step_range = torch.arange(0, self.k_step, pndms, dtype=torch.long, device=device).flip(0)
490
+ plms_noise_stage = torch.tensor(0, dtype=torch.long, device=device)
491
+ noise_list = torch.zeros((0, 1, 1, self.mel_bins, n_frames), device=device)
492
+
493
+ ot = step_range[0]
494
+ ot_1 = torch.full((1,), ot, device=device, dtype=torch.long)
495
+ if export_denoise:
496
+ torch.onnx.export(
497
+ self.denoise_fn,
498
+ (x.cpu(), ot_1.cpu(), cond.cpu()),
499
+ f"{project_name}_denoise.onnx",
500
+ input_names=["noise", "time", "condition"],
501
+ output_names=["noise_pred"],
502
+ dynamic_axes={
503
+ "noise": [3],
504
+ "condition": [2]
505
+ },
506
+ opset_version=16
507
+ )
508
+
509
+ for t in step_range:
510
+ t_1 = torch.full((1,), t, device=device, dtype=torch.long)
511
+ noise_pred = self.denoise_fn(x, t_1, cond)
512
+ t_prev = t_1 - pndms
513
+ t_prev = t_prev * (t_prev > 0)
514
+ if plms_noise_stage == 0:
515
+ if export_pred:
516
+ torch.onnx.export(
517
+ self.xp,
518
+ (x.cpu(), noise_pred.cpu(), t_1.cpu(), t_prev.cpu()),
519
+ f"{project_name}_pred.onnx",
520
+ input_names=["noise", "noise_pred", "time", "time_prev"],
521
+ output_names=["noise_pred_o"],
522
+ dynamic_axes={
523
+ "noise": [3],
524
+ "noise_pred": [3]
525
+ },
526
+ opset_version=16
527
+ )
528
+
529
+ x_pred = self.get_x_pred(x, noise_pred, t_1, t_prev)
530
+ noise_pred_prev = self.denoise_fn(x_pred, t_prev, cond=cond)
531
+ noise_pred_prime = predict_stage0(noise_pred, noise_pred_prev)
532
+
533
+ elif plms_noise_stage == 1:
534
+ noise_pred_prime = predict_stage1(noise_pred, noise_list)
535
+
536
+ elif plms_noise_stage == 2:
537
+ noise_pred_prime = predict_stage2(noise_pred, noise_list)
538
+
539
+ else:
540
+ noise_pred_prime = predict_stage3(noise_pred, noise_list)
541
+
542
+ noise_pred = noise_pred.unsqueeze(0)
543
+
544
+ if plms_noise_stage < 3:
545
+ noise_list = torch.cat((noise_list, noise_pred), dim=0)
546
+ plms_noise_stage = plms_noise_stage + 1
547
+
548
+ else:
549
+ noise_list = torch.cat((noise_list[-2:], noise_pred), dim=0)
550
+
551
+ x = self.get_x_pred(x, noise_pred_prime, t_1, t_prev)
552
+ if export_after:
553
+ torch.onnx.export(
554
+ self.ad,
555
+ x.cpu(),
556
+ f"{project_name}_after.onnx",
557
+ input_names=["x"],
558
+ output_names=["mel_out"],
559
+ dynamic_axes={
560
+ "x": [3]
561
+ },
562
+ opset_version=16
563
+ )
564
+ x = self.ad(x)
565
+
566
+ print((x == org_y_x).all())
567
+ return x
568
+
569
+ def forward(self, condition=None, init_noise=None, pndms=None, k_step=None):
570
+ cond = condition
571
+ x = init_noise
572
+
573
+ device = cond.device
574
+ n_frames = cond.shape[2]
575
+ step_range = torch.arange(0, k_step.item(), pndms.item(), dtype=torch.long, device=device).flip(0)
576
+ plms_noise_stage = torch.tensor(0, dtype=torch.long, device=device)
577
+ noise_list = torch.zeros((0, 1, 1, self.mel_bins, n_frames), device=device)
578
+
579
+ ot = step_range[0]
580
+ ot_1 = torch.full((1,), ot, device=device, dtype=torch.long)
581
+
582
+ for t in step_range:
583
+ t_1 = torch.full((1,), t, device=device, dtype=torch.long)
584
+ noise_pred = self.denoise_fn(x, t_1, cond)
585
+ t_prev = t_1 - pndms
586
+ t_prev = t_prev * (t_prev > 0)
587
+ if plms_noise_stage == 0:
588
+ x_pred = self.get_x_pred(x, noise_pred, t_1, t_prev)
589
+ noise_pred_prev = self.denoise_fn(x_pred, t_prev, cond=cond)
590
+ noise_pred_prime = predict_stage0(noise_pred, noise_pred_prev)
591
+
592
+ elif plms_noise_stage == 1:
593
+ noise_pred_prime = predict_stage1(noise_pred, noise_list)
594
+
595
+ elif plms_noise_stage == 2:
596
+ noise_pred_prime = predict_stage2(noise_pred, noise_list)
597
+
598
+ else:
599
+ noise_pred_prime = predict_stage3(noise_pred, noise_list)
600
+
601
+ noise_pred = noise_pred.unsqueeze(0)
602
+
603
+ if plms_noise_stage < 3:
604
+ noise_list = torch.cat((noise_list, noise_pred), dim=0)
605
+ plms_noise_stage = plms_noise_stage + 1
606
+
607
+ else:
608
+ noise_list = torch.cat((noise_list[-2:], noise_pred), dim=0)
609
+
610
+ x = self.get_x_pred(x, noise_pred_prime, t_1, t_prev)
611
+ x = self.ad(x)
612
+ return x
diffusion/dpm_solver_pytorch.py ADDED
@@ -0,0 +1,1201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+
5
+
6
+ class NoiseScheduleVP:
7
+ def __init__(
8
+ self,
9
+ schedule='discrete',
10
+ betas=None,
11
+ alphas_cumprod=None,
12
+ continuous_beta_0=0.1,
13
+ continuous_beta_1=20.,
14
+ ):
15
+ """Create a wrapper class for the forward SDE (VP type).
16
+
17
+ ***
18
+ Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
19
+ We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
20
+ ***
21
+
22
+ The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
23
+ We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
24
+ Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
25
+
26
+ log_alpha_t = self.marginal_log_mean_coeff(t)
27
+ sigma_t = self.marginal_std(t)
28
+ lambda_t = self.marginal_lambda(t)
29
+
30
+ Moreover, as lambda(t) is an invertible function, we also support its inverse function:
31
+
32
+ t = self.inverse_lambda(lambda_t)
33
+
34
+ ===============================================================
35
+
36
+ We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
37
+
38
+ 1. For discrete-time DPMs:
39
+
40
+ For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
41
+ t_i = (i + 1) / N
42
+ e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
43
+ We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
44
+
45
+ Args:
46
+ betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
47
+ alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
48
+
49
+ Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
50
+
51
+ **Important**: Please pay special attention for the args for `alphas_cumprod`:
52
+ The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
53
+ q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
54
+ Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
55
+ alpha_{t_n} = \sqrt{\hat{alpha_n}},
56
+ and
57
+ log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
58
+
59
+
60
+ 2. For continuous-time DPMs:
61
+
62
+ We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
63
+ schedule are the default settings in DDPM and improved-DDPM:
64
+
65
+ Args:
66
+ beta_min: A `float` number. The smallest beta for the linear schedule.
67
+ beta_max: A `float` number. The largest beta for the linear schedule.
68
+ cosine_s: A `float` number. The hyperparameter in the cosine schedule.
69
+ cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
70
+ T: A `float` number. The ending time of the forward process.
71
+
72
+ ===============================================================
73
+
74
+ Args:
75
+ schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
76
+ 'linear' or 'cosine' for continuous-time DPMs.
77
+ Returns:
78
+ A wrapper object of the forward SDE (VP type).
79
+
80
+ ===============================================================
81
+
82
+ Example:
83
+
84
+ # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
85
+ >>> ns = NoiseScheduleVP('discrete', betas=betas)
86
+
87
+ # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
88
+ >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
89
+
90
+ # For continuous-time DPMs (VPSDE), linear schedule:
91
+ >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
92
+
93
+ """
94
+
95
+ if schedule not in ['discrete', 'linear', 'cosine']:
96
+ raise ValueError(
97
+ "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(
98
+ schedule))
99
+
100
+ self.schedule = schedule
101
+ if schedule == 'discrete':
102
+ if betas is not None:
103
+ log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
104
+ else:
105
+ assert alphas_cumprod is not None
106
+ log_alphas = 0.5 * torch.log(alphas_cumprod)
107
+ self.total_N = len(log_alphas)
108
+ self.T = 1.
109
+ self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
110
+ self.log_alpha_array = log_alphas.reshape((1, -1,))
111
+ else:
112
+ self.total_N = 1000
113
+ self.beta_0 = continuous_beta_0
114
+ self.beta_1 = continuous_beta_1
115
+ self.cosine_s = 0.008
116
+ self.cosine_beta_max = 999.
117
+ self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (
118
+ 1. + self.cosine_s) / math.pi - self.cosine_s
119
+ self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
120
+ self.schedule = schedule
121
+ if schedule == 'cosine':
122
+ # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
123
+ # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
124
+ self.T = 0.9946
125
+ else:
126
+ self.T = 1.
127
+
128
+ def marginal_log_mean_coeff(self, t):
129
+ """
130
+ Compute log(alpha_t) of a given continuous-time label t in [0, T].
131
+ """
132
+ if self.schedule == 'discrete':
133
+ return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),
134
+ self.log_alpha_array.to(t.device)).reshape((-1))
135
+ elif self.schedule == 'linear':
136
+ return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
137
+ elif self.schedule == 'cosine':
138
+ log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
139
+ log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
140
+ return log_alpha_t
141
+
142
+ def marginal_alpha(self, t):
143
+ """
144
+ Compute alpha_t of a given continuous-time label t in [0, T].
145
+ """
146
+ return torch.exp(self.marginal_log_mean_coeff(t))
147
+
148
+ def marginal_std(self, t):
149
+ """
150
+ Compute sigma_t of a given continuous-time label t in [0, T].
151
+ """
152
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
153
+
154
+ def marginal_lambda(self, t):
155
+ """
156
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
157
+ """
158
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
159
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
160
+ return log_mean_coeff - log_std
161
+
162
+ def inverse_lambda(self, lamb):
163
+ """
164
+ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
165
+ """
166
+ if self.schedule == 'linear':
167
+ tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
168
+ Delta = self.beta_0 ** 2 + tmp
169
+ return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
170
+ elif self.schedule == 'discrete':
171
+ log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
172
+ t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),
173
+ torch.flip(self.t_array.to(lamb.device), [1]))
174
+ return t.reshape((-1,))
175
+ else:
176
+ log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
177
+ t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (
178
+ 1. + self.cosine_s) / math.pi - self.cosine_s
179
+ t = t_fn(log_alpha)
180
+ return t
181
+
182
+
183
+ def model_wrapper(
184
+ model,
185
+ noise_schedule,
186
+ model_type="noise",
187
+ model_kwargs={},
188
+ guidance_type="uncond",
189
+ condition=None,
190
+ unconditional_condition=None,
191
+ guidance_scale=1.,
192
+ classifier_fn=None,
193
+ classifier_kwargs={},
194
+ ):
195
+ """Create a wrapper function for the noise prediction model.
196
+
197
+ DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
198
+ firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
199
+
200
+ We support four types of the diffusion model by setting `model_type`:
201
+
202
+ 1. "noise": noise prediction model. (Trained by predicting noise).
203
+
204
+ 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
205
+
206
+ 3. "v": velocity prediction model. (Trained by predicting the velocity).
207
+ The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
208
+
209
+ [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
210
+ arXiv preprint arXiv:2202.00512 (2022).
211
+ [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
212
+ arXiv preprint arXiv:2210.02303 (2022).
213
+
214
+ 4. "score": marginal score function. (Trained by denoising score matching).
215
+ Note that the score function and the noise prediction model follows a simple relationship:
216
+ ```
217
+ noise(x_t, t) = -sigma_t * score(x_t, t)
218
+ ```
219
+
220
+ We support three types of guided sampling by DPMs by setting `guidance_type`:
221
+ 1. "uncond": unconditional sampling by DPMs.
222
+ The input `model` has the following format:
223
+ ``
224
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
225
+ ``
226
+
227
+ 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
228
+ The input `model` has the following format:
229
+ ``
230
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
231
+ ``
232
+
233
+ The input `classifier_fn` has the following format:
234
+ ``
235
+ classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
236
+ ``
237
+
238
+ [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
239
+ in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
240
+
241
+ 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
242
+ The input `model` has the following format:
243
+ ``
244
+ model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
245
+ ``
246
+ And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
247
+
248
+ [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
249
+ arXiv preprint arXiv:2207.12598 (2022).
250
+
251
+
252
+ The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
253
+ or continuous-time labels (i.e. epsilon to T).
254
+
255
+ We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
256
+ ``
257
+ def model_fn(x, t_continuous) -> noise:
258
+ t_input = get_model_input_time(t_continuous)
259
+ return noise_pred(model, x, t_input, **model_kwargs)
260
+ ``
261
+ where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
262
+
263
+ ===============================================================
264
+
265
+ Args:
266
+ model: A diffusion model with the corresponding format described above.
267
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
268
+ model_type: A `str`. The parameterization type of the diffusion model.
269
+ "noise" or "x_start" or "v" or "score".
270
+ model_kwargs: A `dict`. A dict for the other inputs of the model function.
271
+ guidance_type: A `str`. The type of the guidance for sampling.
272
+ "uncond" or "classifier" or "classifier-free".
273
+ condition: A pytorch tensor. The condition for the guided sampling.
274
+ Only used for "classifier" or "classifier-free" guidance type.
275
+ unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
276
+ Only used for "classifier-free" guidance type.
277
+ guidance_scale: A `float`. The scale for the guided sampling.
278
+ classifier_fn: A classifier function. Only used for the classifier guidance.
279
+ classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
280
+ Returns:
281
+ A noise prediction model that accepts the noised data and the continuous time as the inputs.
282
+ """
283
+
284
+ def get_model_input_time(t_continuous):
285
+ """
286
+ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
287
+ For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
288
+ For continuous-time DPMs, we just use `t_continuous`.
289
+ """
290
+ if noise_schedule.schedule == 'discrete':
291
+ return (t_continuous - 1. / noise_schedule.total_N) * noise_schedule.total_N
292
+ else:
293
+ return t_continuous
294
+
295
+ def noise_pred_fn(x, t_continuous, cond=None):
296
+ if t_continuous.reshape((-1,)).shape[0] == 1:
297
+ t_continuous = t_continuous.expand((x.shape[0]))
298
+ t_input = get_model_input_time(t_continuous)
299
+ if cond is None:
300
+ output = model(x, t_input, **model_kwargs)
301
+ else:
302
+ output = model(x, t_input, cond, **model_kwargs)
303
+ if model_type == "noise":
304
+ return output
305
+ elif model_type == "x_start":
306
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
307
+ dims = x.dim()
308
+ return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
309
+ elif model_type == "v":
310
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
311
+ dims = x.dim()
312
+ return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
313
+ elif model_type == "score":
314
+ sigma_t = noise_schedule.marginal_std(t_continuous)
315
+ dims = x.dim()
316
+ return -expand_dims(sigma_t, dims) * output
317
+
318
+ def cond_grad_fn(x, t_input):
319
+ """
320
+ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
321
+ """
322
+ with torch.enable_grad():
323
+ x_in = x.detach().requires_grad_(True)
324
+ log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
325
+ return torch.autograd.grad(log_prob.sum(), x_in)[0]
326
+
327
+ def model_fn(x, t_continuous):
328
+ """
329
+ The noise predicition model function that is used for DPM-Solver.
330
+ """
331
+ if t_continuous.reshape((-1,)).shape[0] == 1:
332
+ t_continuous = t_continuous.expand((x.shape[0]))
333
+ if guidance_type == "uncond":
334
+ return noise_pred_fn(x, t_continuous)
335
+ elif guidance_type == "classifier":
336
+ assert classifier_fn is not None
337
+ t_input = get_model_input_time(t_continuous)
338
+ cond_grad = cond_grad_fn(x, t_input)
339
+ sigma_t = noise_schedule.marginal_std(t_continuous)
340
+ noise = noise_pred_fn(x, t_continuous)
341
+ return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
342
+ elif guidance_type == "classifier-free":
343
+ if guidance_scale == 1. or unconditional_condition is None:
344
+ return noise_pred_fn(x, t_continuous, cond=condition)
345
+ else:
346
+ x_in = torch.cat([x] * 2)
347
+ t_in = torch.cat([t_continuous] * 2)
348
+ c_in = torch.cat([unconditional_condition, condition])
349
+ noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
350
+ return noise_uncond + guidance_scale * (noise - noise_uncond)
351
+
352
+ assert model_type in ["noise", "x_start", "v"]
353
+ assert guidance_type in ["uncond", "classifier", "classifier-free"]
354
+ return model_fn
355
+
356
+
357
+ class DPM_Solver:
358
+ def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
359
+ """Construct a DPM-Solver.
360
+
361
+ We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
362
+ If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
363
+ If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
364
+ In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
365
+ The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
366
+
367
+ Args:
368
+ model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
369
+ ``
370
+ def model_fn(x, t_continuous):
371
+ return noise
372
+ ``
373
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
374
+ predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
375
+ thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
376
+ max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
377
+
378
+ [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
379
+ """
380
+ self.model = model_fn
381
+ self.noise_schedule = noise_schedule
382
+ self.predict_x0 = predict_x0
383
+ self.thresholding = thresholding
384
+ self.max_val = max_val
385
+
386
+ def noise_prediction_fn(self, x, t):
387
+ """
388
+ Return the noise prediction model.
389
+ """
390
+ return self.model(x, t)
391
+
392
+ def data_prediction_fn(self, x, t):
393
+ """
394
+ Return the data prediction model (with thresholding).
395
+ """
396
+ noise = self.noise_prediction_fn(x, t)
397
+ dims = x.dim()
398
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
399
+ x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
400
+ if self.thresholding:
401
+ p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
402
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
403
+ s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
404
+ x0 = torch.clamp(x0, -s, s) / s
405
+ return x0
406
+
407
+ def model_fn(self, x, t):
408
+ """
409
+ Convert the model to the noise prediction model or the data prediction model.
410
+ """
411
+ if self.predict_x0:
412
+ return self.data_prediction_fn(x, t)
413
+ else:
414
+ return self.noise_prediction_fn(x, t)
415
+
416
+ def get_time_steps(self, skip_type, t_T, t_0, N, device):
417
+ """Compute the intermediate time steps for sampling.
418
+
419
+ Args:
420
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
421
+ - 'logSNR': uniform logSNR for the time steps.
422
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
423
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
424
+ t_T: A `float`. The starting time of the sampling (default is T).
425
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
426
+ N: A `int`. The total number of the spacing of the time steps.
427
+ device: A torch device.
428
+ Returns:
429
+ A pytorch tensor of the time steps, with the shape (N + 1,).
430
+ """
431
+ if skip_type == 'logSNR':
432
+ lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
433
+ lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
434
+ logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
435
+ return self.noise_schedule.inverse_lambda(logSNR_steps)
436
+ elif skip_type == 'time_uniform':
437
+ return torch.linspace(t_T, t_0, N + 1).to(device)
438
+ elif skip_type == 'time_quadratic':
439
+ t_order = 2
440
+ t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)
441
+ return t
442
+ else:
443
+ raise ValueError(
444
+ "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
445
+
446
+ def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
447
+ """
448
+ Get the order of each step for sampling by the singlestep DPM-Solver.
449
+
450
+ We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
451
+ Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
452
+ - If order == 1:
453
+ We take `steps` of DPM-Solver-1 (i.e. DDIM).
454
+ - If order == 2:
455
+ - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
456
+ - If steps % 2 == 0, we use K steps of DPM-Solver-2.
457
+ - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
458
+ - If order == 3:
459
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
460
+ - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
461
+ - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
462
+ - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
463
+
464
+ ============================================
465
+ Args:
466
+ order: A `int`. The max order for the solver (2 or 3).
467
+ steps: A `int`. The total number of function evaluations (NFE).
468
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
469
+ - 'logSNR': uniform logSNR for the time steps.
470
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
471
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
472
+ t_T: A `float`. The starting time of the sampling (default is T).
473
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
474
+ device: A torch device.
475
+ Returns:
476
+ orders: A list of the solver order of each step.
477
+ """
478
+ if order == 3:
479
+ K = steps // 3 + 1
480
+ if steps % 3 == 0:
481
+ orders = [3, ] * (K - 2) + [2, 1]
482
+ elif steps % 3 == 1:
483
+ orders = [3, ] * (K - 1) + [1]
484
+ else:
485
+ orders = [3, ] * (K - 1) + [2]
486
+ elif order == 2:
487
+ if steps % 2 == 0:
488
+ K = steps // 2
489
+ orders = [2, ] * K
490
+ else:
491
+ K = steps // 2 + 1
492
+ orders = [2, ] * (K - 1) + [1]
493
+ elif order == 1:
494
+ K = 1
495
+ orders = [1, ] * steps
496
+ else:
497
+ raise ValueError("'order' must be '1' or '2' or '3'.")
498
+ if skip_type == 'logSNR':
499
+ # To reproduce the results in DPM-Solver paper
500
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
501
+ else:
502
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[
503
+ torch.cumsum(torch.tensor([0, ] + orders), dim=0).to(device)]
504
+ return timesteps_outer, orders
505
+
506
+ def denoise_fn(self, x, s):
507
+ """
508
+ Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
509
+ """
510
+ return self.data_prediction_fn(x, s)
511
+
512
+ def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
513
+ """
514
+ DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
515
+
516
+ Args:
517
+ x: A pytorch tensor. The initial value at time `s`.
518
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
519
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
520
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
521
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
522
+ return_intermediate: A `bool`. If true, also return the model value at time `s`.
523
+ Returns:
524
+ x_t: A pytorch tensor. The approximated solution at time `t`.
525
+ """
526
+ ns = self.noise_schedule
527
+ dims = x.dim()
528
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
529
+ h = lambda_t - lambda_s
530
+ log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
531
+ sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
532
+ alpha_t = torch.exp(log_alpha_t)
533
+
534
+ if self.predict_x0:
535
+ phi_1 = torch.expm1(-h)
536
+ if model_s is None:
537
+ model_s = self.model_fn(x, s)
538
+ x_t = (
539
+ expand_dims(sigma_t / sigma_s, dims) * x
540
+ - expand_dims(alpha_t * phi_1, dims) * model_s
541
+ )
542
+ if return_intermediate:
543
+ return x_t, {'model_s': model_s}
544
+ else:
545
+ return x_t
546
+ else:
547
+ phi_1 = torch.expm1(h)
548
+ if model_s is None:
549
+ model_s = self.model_fn(x, s)
550
+ x_t = (
551
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
552
+ - expand_dims(sigma_t * phi_1, dims) * model_s
553
+ )
554
+ if return_intermediate:
555
+ return x_t, {'model_s': model_s}
556
+ else:
557
+ return x_t
558
+
559
+ def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,
560
+ solver_type='dpm_solver'):
561
+ """
562
+ Singlestep solver DPM-Solver-2 from time `s` to time `t`.
563
+
564
+ Args:
565
+ x: A pytorch tensor. The initial value at time `s`.
566
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
567
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
568
+ r1: A `float`. The hyperparameter of the second-order solver.
569
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
570
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
571
+ return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
572
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
573
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
574
+ Returns:
575
+ x_t: A pytorch tensor. The approximated solution at time `t`.
576
+ """
577
+ if solver_type not in ['dpm_solver', 'taylor']:
578
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
579
+ if r1 is None:
580
+ r1 = 0.5
581
+ ns = self.noise_schedule
582
+ dims = x.dim()
583
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
584
+ h = lambda_t - lambda_s
585
+ lambda_s1 = lambda_s + r1 * h
586
+ s1 = ns.inverse_lambda(lambda_s1)
587
+ log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(
588
+ s1), ns.marginal_log_mean_coeff(t)
589
+ sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
590
+ alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
591
+
592
+ if self.predict_x0:
593
+ phi_11 = torch.expm1(-r1 * h)
594
+ phi_1 = torch.expm1(-h)
595
+
596
+ if model_s is None:
597
+ model_s = self.model_fn(x, s)
598
+ x_s1 = (
599
+ expand_dims(sigma_s1 / sigma_s, dims) * x
600
+ - expand_dims(alpha_s1 * phi_11, dims) * model_s
601
+ )
602
+ model_s1 = self.model_fn(x_s1, s1)
603
+ if solver_type == 'dpm_solver':
604
+ x_t = (
605
+ expand_dims(sigma_t / sigma_s, dims) * x
606
+ - expand_dims(alpha_t * phi_1, dims) * model_s
607
+ - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
608
+ )
609
+ elif solver_type == 'taylor':
610
+ x_t = (
611
+ expand_dims(sigma_t / sigma_s, dims) * x
612
+ - expand_dims(alpha_t * phi_1, dims) * model_s
613
+ + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (
614
+ model_s1 - model_s)
615
+ )
616
+ else:
617
+ phi_11 = torch.expm1(r1 * h)
618
+ phi_1 = torch.expm1(h)
619
+
620
+ if model_s is None:
621
+ model_s = self.model_fn(x, s)
622
+ x_s1 = (
623
+ expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
624
+ - expand_dims(sigma_s1 * phi_11, dims) * model_s
625
+ )
626
+ model_s1 = self.model_fn(x_s1, s1)
627
+ if solver_type == 'dpm_solver':
628
+ x_t = (
629
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
630
+ - expand_dims(sigma_t * phi_1, dims) * model_s
631
+ - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
632
+ )
633
+ elif solver_type == 'taylor':
634
+ x_t = (
635
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
636
+ - expand_dims(sigma_t * phi_1, dims) * model_s
637
+ - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
638
+ )
639
+ if return_intermediate:
640
+ return x_t, {'model_s': model_s, 'model_s1': model_s1}
641
+ else:
642
+ return x_t
643
+
644
+ def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,
645
+ return_intermediate=False, solver_type='dpm_solver'):
646
+ """
647
+ Singlestep solver DPM-Solver-3 from time `s` to time `t`.
648
+
649
+ Args:
650
+ x: A pytorch tensor. The initial value at time `s`.
651
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
652
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
653
+ r1: A `float`. The hyperparameter of the third-order solver.
654
+ r2: A `float`. The hyperparameter of the third-order solver.
655
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
656
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
657
+ model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
658
+ If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
659
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
660
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
661
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
662
+ Returns:
663
+ x_t: A pytorch tensor. The approximated solution at time `t`.
664
+ """
665
+ if solver_type not in ['dpm_solver', 'taylor']:
666
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
667
+ if r1 is None:
668
+ r1 = 1. / 3.
669
+ if r2 is None:
670
+ r2 = 2. / 3.
671
+ ns = self.noise_schedule
672
+ dims = x.dim()
673
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
674
+ h = lambda_t - lambda_s
675
+ lambda_s1 = lambda_s + r1 * h
676
+ lambda_s2 = lambda_s + r2 * h
677
+ s1 = ns.inverse_lambda(lambda_s1)
678
+ s2 = ns.inverse_lambda(lambda_s2)
679
+ log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(
680
+ s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
681
+ sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(
682
+ s2), ns.marginal_std(t)
683
+ alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
684
+
685
+ if self.predict_x0:
686
+ phi_11 = torch.expm1(-r1 * h)
687
+ phi_12 = torch.expm1(-r2 * h)
688
+ phi_1 = torch.expm1(-h)
689
+ phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
690
+ phi_2 = phi_1 / h + 1.
691
+ phi_3 = phi_2 / h - 0.5
692
+
693
+ if model_s is None:
694
+ model_s = self.model_fn(x, s)
695
+ if model_s1 is None:
696
+ x_s1 = (
697
+ expand_dims(sigma_s1 / sigma_s, dims) * x
698
+ - expand_dims(alpha_s1 * phi_11, dims) * model_s
699
+ )
700
+ model_s1 = self.model_fn(x_s1, s1)
701
+ x_s2 = (
702
+ expand_dims(sigma_s2 / sigma_s, dims) * x
703
+ - expand_dims(alpha_s2 * phi_12, dims) * model_s
704
+ + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
705
+ )
706
+ model_s2 = self.model_fn(x_s2, s2)
707
+ if solver_type == 'dpm_solver':
708
+ x_t = (
709
+ expand_dims(sigma_t / sigma_s, dims) * x
710
+ - expand_dims(alpha_t * phi_1, dims) * model_s
711
+ + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
712
+ )
713
+ elif solver_type == 'taylor':
714
+ D1_0 = (1. / r1) * (model_s1 - model_s)
715
+ D1_1 = (1. / r2) * (model_s2 - model_s)
716
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
717
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
718
+ x_t = (
719
+ expand_dims(sigma_t / sigma_s, dims) * x
720
+ - expand_dims(alpha_t * phi_1, dims) * model_s
721
+ + expand_dims(alpha_t * phi_2, dims) * D1
722
+ - expand_dims(alpha_t * phi_3, dims) * D2
723
+ )
724
+ else:
725
+ phi_11 = torch.expm1(r1 * h)
726
+ phi_12 = torch.expm1(r2 * h)
727
+ phi_1 = torch.expm1(h)
728
+ phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
729
+ phi_2 = phi_1 / h - 1.
730
+ phi_3 = phi_2 / h - 0.5
731
+
732
+ if model_s is None:
733
+ model_s = self.model_fn(x, s)
734
+ if model_s1 is None:
735
+ x_s1 = (
736
+ expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
737
+ - expand_dims(sigma_s1 * phi_11, dims) * model_s
738
+ )
739
+ model_s1 = self.model_fn(x_s1, s1)
740
+ x_s2 = (
741
+ expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
742
+ - expand_dims(sigma_s2 * phi_12, dims) * model_s
743
+ - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
744
+ )
745
+ model_s2 = self.model_fn(x_s2, s2)
746
+ if solver_type == 'dpm_solver':
747
+ x_t = (
748
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
749
+ - expand_dims(sigma_t * phi_1, dims) * model_s
750
+ - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
751
+ )
752
+ elif solver_type == 'taylor':
753
+ D1_0 = (1. / r1) * (model_s1 - model_s)
754
+ D1_1 = (1. / r2) * (model_s2 - model_s)
755
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
756
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
757
+ x_t = (
758
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
759
+ - expand_dims(sigma_t * phi_1, dims) * model_s
760
+ - expand_dims(sigma_t * phi_2, dims) * D1
761
+ - expand_dims(sigma_t * phi_3, dims) * D2
762
+ )
763
+
764
+ if return_intermediate:
765
+ return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
766
+ else:
767
+ return x_t
768
+
769
+ def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
770
+ """
771
+ Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
772
+
773
+ Args:
774
+ x: A pytorch tensor. The initial value at time `s`.
775
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
776
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
777
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
778
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
779
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
780
+ Returns:
781
+ x_t: A pytorch tensor. The approximated solution at time `t`.
782
+ """
783
+ if solver_type not in ['dpm_solver', 'taylor']:
784
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
785
+ ns = self.noise_schedule
786
+ dims = x.dim()
787
+ model_prev_1, model_prev_0 = model_prev_list
788
+ t_prev_1, t_prev_0 = t_prev_list
789
+ lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(
790
+ t_prev_0), ns.marginal_lambda(t)
791
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
792
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
793
+ alpha_t = torch.exp(log_alpha_t)
794
+
795
+ h_0 = lambda_prev_0 - lambda_prev_1
796
+ h = lambda_t - lambda_prev_0
797
+ r0 = h_0 / h
798
+ D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
799
+ if self.predict_x0:
800
+ if solver_type == 'dpm_solver':
801
+ x_t = (
802
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
803
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
804
+ - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
805
+ )
806
+ elif solver_type == 'taylor':
807
+ x_t = (
808
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
809
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
810
+ + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
811
+ )
812
+ else:
813
+ if solver_type == 'dpm_solver':
814
+ x_t = (
815
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
816
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
817
+ - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
818
+ )
819
+ elif solver_type == 'taylor':
820
+ x_t = (
821
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
822
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
823
+ - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
824
+ )
825
+ return x_t
826
+
827
+ def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
828
+ """
829
+ Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
830
+
831
+ Args:
832
+ x: A pytorch tensor. The initial value at time `s`.
833
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
834
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
835
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
836
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
837
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
838
+ Returns:
839
+ x_t: A pytorch tensor. The approximated solution at time `t`.
840
+ """
841
+ ns = self.noise_schedule
842
+ dims = x.dim()
843
+ model_prev_2, model_prev_1, model_prev_0 = model_prev_list
844
+ t_prev_2, t_prev_1, t_prev_0 = t_prev_list
845
+ lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(
846
+ t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
847
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
848
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
849
+ alpha_t = torch.exp(log_alpha_t)
850
+
851
+ h_1 = lambda_prev_1 - lambda_prev_2
852
+ h_0 = lambda_prev_0 - lambda_prev_1
853
+ h = lambda_t - lambda_prev_0
854
+ r0, r1 = h_0 / h, h_1 / h
855
+ D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
856
+ D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
857
+ D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
858
+ D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
859
+ if self.predict_x0:
860
+ x_t = (
861
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
862
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
863
+ + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
864
+ - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2
865
+ )
866
+ else:
867
+ x_t = (
868
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
869
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
870
+ - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
871
+ - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2
872
+ )
873
+ return x_t
874
+
875
+ def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None,
876
+ r2=None):
877
+ """
878
+ Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
879
+
880
+ Args:
881
+ x: A pytorch tensor. The initial value at time `s`.
882
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
883
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
884
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
885
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
886
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
887
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
888
+ r1: A `float`. The hyperparameter of the second-order or third-order solver.
889
+ r2: A `float`. The hyperparameter of the third-order solver.
890
+ Returns:
891
+ x_t: A pytorch tensor. The approximated solution at time `t`.
892
+ """
893
+ if order == 1:
894
+ return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
895
+ elif order == 2:
896
+ return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,
897
+ solver_type=solver_type, r1=r1)
898
+ elif order == 3:
899
+ return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,
900
+ solver_type=solver_type, r1=r1, r2=r2)
901
+ else:
902
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
903
+
904
+ def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
905
+ """
906
+ Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
907
+
908
+ Args:
909
+ x: A pytorch tensor. The initial value at time `s`.
910
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
911
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
912
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
913
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
914
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
915
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
916
+ Returns:
917
+ x_t: A pytorch tensor. The approximated solution at time `t`.
918
+ """
919
+ if order == 1:
920
+ return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
921
+ elif order == 2:
922
+ return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
923
+ elif order == 3:
924
+ return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
925
+ else:
926
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
927
+
928
+ def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,
929
+ solver_type='dpm_solver'):
930
+ """
931
+ The adaptive step size solver based on singlestep DPM-Solver.
932
+
933
+ Args:
934
+ x: A pytorch tensor. The initial value at time `t_T`.
935
+ order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
936
+ t_T: A `float`. The starting time of the sampling (default is T).
937
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
938
+ h_init: A `float`. The initial step size (for logSNR).
939
+ atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
940
+ rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
941
+ theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
942
+ t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
943
+ current time and `t_0` is less than `t_err`. The default setting is 1e-5.
944
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
945
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
946
+ Returns:
947
+ x_0: A pytorch tensor. The approximated solution at time `t_0`.
948
+
949
+ [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
950
+ """
951
+ ns = self.noise_schedule
952
+ s = t_T * torch.ones((x.shape[0],)).to(x)
953
+ lambda_s = ns.marginal_lambda(s)
954
+ lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
955
+ h = h_init * torch.ones_like(s).to(x)
956
+ x_prev = x
957
+ nfe = 0
958
+ if order == 2:
959
+ r1 = 0.5
960
+ lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
961
+ higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
962
+ solver_type=solver_type,
963
+ **kwargs)
964
+ elif order == 3:
965
+ r1, r2 = 1. / 3., 2. / 3.
966
+ lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
967
+ return_intermediate=True,
968
+ solver_type=solver_type)
969
+ higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,
970
+ solver_type=solver_type,
971
+ **kwargs)
972
+ else:
973
+ raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
974
+ while torch.abs((s - t_0)).mean() > t_err:
975
+ t = ns.inverse_lambda(lambda_s + h)
976
+ x_lower, lower_noise_kwargs = lower_update(x, s, t)
977
+ x_higher = higher_update(x, s, t, **lower_noise_kwargs)
978
+ delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
979
+ norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
980
+ E = norm_fn((x_higher - x_lower) / delta).max()
981
+ if torch.all(E <= 1.):
982
+ x = x_higher
983
+ s = t
984
+ x_prev = x_lower
985
+ lambda_s = ns.marginal_lambda(s)
986
+ h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
987
+ nfe += order
988
+ print('adaptive solver nfe', nfe)
989
+ return x
990
+
991
+ def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
992
+ method='singlestep', denoise=False, solver_type='dpm_solver', atol=0.0078,
993
+ rtol=0.05,
994
+ ):
995
+ """
996
+ Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
997
+
998
+ =====================================================
999
+
1000
+ We support the following algorithms for both noise prediction model and data prediction model:
1001
+ - 'singlestep':
1002
+ Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
1003
+ We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
1004
+ The total number of function evaluations (NFE) == `steps`.
1005
+ Given a fixed NFE == `steps`, the sampling procedure is:
1006
+ - If `order` == 1:
1007
+ - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
1008
+ - If `order` == 2:
1009
+ - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
1010
+ - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
1011
+ - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
1012
+ - If `order` == 3:
1013
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
1014
+ - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
1015
+ - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
1016
+ - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
1017
+ - 'multistep':
1018
+ Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
1019
+ We initialize the first `order` values by lower order multistep solvers.
1020
+ Given a fixed NFE == `steps`, the sampling procedure is:
1021
+ Denote K = steps.
1022
+ - If `order` == 1:
1023
+ - We use K steps of DPM-Solver-1 (i.e. DDIM).
1024
+ - If `order` == 2:
1025
+ - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
1026
+ - If `order` == 3:
1027
+ - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
1028
+ - 'singlestep_fixed':
1029
+ Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
1030
+ We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
1031
+ - 'adaptive':
1032
+ Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
1033
+ We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
1034
+ You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
1035
+ (NFE) and the sample quality.
1036
+ - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
1037
+ - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
1038
+
1039
+ =====================================================
1040
+
1041
+ Some advices for choosing the algorithm:
1042
+ - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
1043
+ Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
1044
+ e.g.
1045
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
1046
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
1047
+ skip_type='time_uniform', method='singlestep')
1048
+ - For **guided sampling with large guidance scale** by DPMs:
1049
+ Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
1050
+ e.g.
1051
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
1052
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
1053
+ skip_type='time_uniform', method='multistep')
1054
+
1055
+ We support three types of `skip_type`:
1056
+ - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
1057
+ - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
1058
+ - 'time_quadratic': quadratic time for the time steps.
1059
+
1060
+ =====================================================
1061
+ Args:
1062
+ x: A pytorch tensor. The initial value at time `t_start`
1063
+ e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
1064
+ steps: A `int`. The total number of function evaluations (NFE).
1065
+ t_start: A `float`. The starting time of the sampling.
1066
+ If `T` is None, we use self.noise_schedule.T (default is 1.0).
1067
+ t_end: A `float`. The ending time of the sampling.
1068
+ If `t_end` is None, we use 1. / self.noise_schedule.total_N.
1069
+ e.g. if total_N == 1000, we have `t_end` == 1e-3.
1070
+ For discrete-time DPMs:
1071
+ - We recommend `t_end` == 1. / self.noise_schedule.total_N.
1072
+ For continuous-time DPMs:
1073
+ - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
1074
+ order: A `int`. The order of DPM-Solver.
1075
+ skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
1076
+ method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
1077
+ denoise: A `bool`. Whether to denoise at the final step. Default is False.
1078
+ If `denoise` is True, the total NFE is (`steps` + 1).
1079
+ solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
1080
+ atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1081
+ rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1082
+ Returns:
1083
+ x_end: A pytorch tensor. The approximated solution at time `t_end`.
1084
+
1085
+ """
1086
+ t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
1087
+ t_T = self.noise_schedule.T if t_start is None else t_start
1088
+ device = x.device
1089
+ if method == 'adaptive':
1090
+ with torch.no_grad():
1091
+ x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,
1092
+ solver_type=solver_type)
1093
+ elif method == 'multistep':
1094
+ assert steps >= order
1095
+ timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
1096
+ assert timesteps.shape[0] - 1 == steps
1097
+ with torch.no_grad():
1098
+ vec_t = timesteps[0].expand((x.shape[0]))
1099
+ model_prev_list = [self.model_fn(x, vec_t)]
1100
+ t_prev_list = [vec_t]
1101
+ # Init the first `order` values by lower order multistep DPM-Solver.
1102
+ for init_order in range(1, order):
1103
+ vec_t = timesteps[init_order].expand(x.shape[0])
1104
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order,
1105
+ solver_type=solver_type)
1106
+ model_prev_list.append(self.model_fn(x, vec_t))
1107
+ t_prev_list.append(vec_t)
1108
+ # Compute the remaining values by `order`-th order multistep DPM-Solver.
1109
+ for step in range(order, steps + 1):
1110
+ vec_t = timesteps[step].expand(x.shape[0])
1111
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, order,
1112
+ solver_type=solver_type)
1113
+ for i in range(order - 1):
1114
+ t_prev_list[i] = t_prev_list[i + 1]
1115
+ model_prev_list[i] = model_prev_list[i + 1]
1116
+ t_prev_list[-1] = vec_t
1117
+ # We do not need to evaluate the final model value.
1118
+ if step < steps:
1119
+ model_prev_list[-1] = self.model_fn(x, vec_t)
1120
+ elif method in ['singlestep', 'singlestep_fixed']:
1121
+ if method == 'singlestep':
1122
+ timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order,
1123
+ skip_type=skip_type,
1124
+ t_T=t_T, t_0=t_0,
1125
+ device=device)
1126
+ elif method == 'singlestep_fixed':
1127
+ K = steps // order
1128
+ orders = [order, ] * K
1129
+ timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
1130
+ for i, order in enumerate(orders):
1131
+ t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
1132
+ timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(),
1133
+ N=order, device=device)
1134
+ lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
1135
+ vec_s, vec_t = t_T_inner.repeat(x.shape[0]), t_0_inner.repeat(x.shape[0])
1136
+ h = lambda_inner[-1] - lambda_inner[0]
1137
+ r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
1138
+ r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
1139
+ x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
1140
+ if denoise:
1141
+ x = self.denoise_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
1142
+ return x
1143
+
1144
+
1145
+ #############################################################
1146
+ # other utility functions
1147
+ #############################################################
1148
+
1149
+ def interpolate_fn(x, xp, yp):
1150
+ """
1151
+ A piecewise linear function y = f(x), using xp and yp as keypoints.
1152
+ We implement f(x) in a differentiable way (i.e. applicable for autograd).
1153
+ The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
1154
+
1155
+ Args:
1156
+ x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
1157
+ xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
1158
+ yp: PyTorch tensor with shape [C, K].
1159
+ Returns:
1160
+ The function values f(x), with shape [N, C].
1161
+ """
1162
+ N, K = x.shape[0], xp.shape[1]
1163
+ all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
1164
+ sorted_all_x, x_indices = torch.sort(all_x, dim=2)
1165
+ x_idx = torch.argmin(x_indices, dim=2)
1166
+ cand_start_idx = x_idx - 1
1167
+ start_idx = torch.where(
1168
+ torch.eq(x_idx, 0),
1169
+ torch.tensor(1, device=x.device),
1170
+ torch.where(
1171
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1172
+ ),
1173
+ )
1174
+ end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
1175
+ start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
1176
+ end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
1177
+ start_idx2 = torch.where(
1178
+ torch.eq(x_idx, 0),
1179
+ torch.tensor(0, device=x.device),
1180
+ torch.where(
1181
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1182
+ ),
1183
+ )
1184
+ y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
1185
+ start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
1186
+ end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
1187
+ cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
1188
+ return cand
1189
+
1190
+
1191
+ def expand_dims(v, dims):
1192
+ """
1193
+ Expand the tensor `v` to the dim `dims`.
1194
+
1195
+ Args:
1196
+ `v`: a PyTorch tensor with shape [N].
1197
+ `dim`: a `int`.
1198
+ Returns:
1199
+ a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
1200
+ """
1201
+ return v[(...,) + (None,) * (dims - 1)]
diffusion/how to export onnx.md ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ - Open [onnx_export](onnx_export.py)
2
+ - project_name = "dddsp" change "project_name" to your project name
3
+ - model_path = f'{project_name}/model_500000.pt' change "model_path" to your model path
4
+ - Run
diffusion/infer_gt_mel.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from diffusion.unit2mel import load_model_vocoder
5
+
6
+
7
+ class DiffGtMel:
8
+ def __init__(self, project_path=None, device=None):
9
+ self.project_path = project_path
10
+ if device is not None:
11
+ self.device = device
12
+ else:
13
+ self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
14
+ self.model = None
15
+ self.vocoder = None
16
+ self.args = None
17
+
18
+ def flush_model(self, project_path, ddsp_config=None):
19
+ if (self.model is None) or (project_path != self.project_path):
20
+ model, vocoder, args = load_model_vocoder(project_path, device=self.device)
21
+ if self.check_args(ddsp_config, args):
22
+ self.model = model
23
+ self.vocoder = vocoder
24
+ self.args = args
25
+
26
+ def check_args(self, args1, args2):
27
+ if args1.data.block_size != args2.data.block_size:
28
+ raise ValueError("DDSP与DIFF模型的block_size不一致")
29
+ if args1.data.sampling_rate != args2.data.sampling_rate:
30
+ raise ValueError("DDSP与DIFF模型的sampling_rate不一致")
31
+ if args1.data.encoder != args2.data.encoder:
32
+ raise ValueError("DDSP与DIFF模型的encoder不一致")
33
+ return True
34
+
35
+ def __call__(self, audio, f0, hubert, volume, acc=1, spk_id=1, k_step=0, method='pndm',
36
+ spk_mix_dict=None, start_frame=0):
37
+ input_mel = self.vocoder.extract(audio, self.args.data.sampling_rate)
38
+ out_mel = self.model(
39
+ hubert,
40
+ f0,
41
+ volume,
42
+ spk_id=spk_id,
43
+ spk_mix_dict=spk_mix_dict,
44
+ gt_spec=input_mel,
45
+ infer=True,
46
+ infer_speedup=acc,
47
+ method=method,
48
+ k_step=k_step,
49
+ use_tqdm=False)
50
+ if start_frame > 0:
51
+ out_mel = out_mel[:, start_frame:, :]
52
+ f0 = f0[:, start_frame:, :]
53
+ output = self.vocoder.infer(out_mel, f0)
54
+ if start_frame > 0:
55
+ output = F.pad(output, (start_frame * self.vocoder.vocoder_hop_size, 0))
56
+ return output
57
+
58
+ def infer(self, audio, f0, hubert, volume, acc=1, spk_id=1, k_step=0, method='pndm', silence_front=0,
59
+ use_silence=False, spk_mix_dict=None):
60
+ start_frame = int(silence_front * self.vocoder.vocoder_sample_rate / self.vocoder.vocoder_hop_size)
61
+ if use_silence:
62
+ audio = audio[:, start_frame * self.vocoder.vocoder_hop_size:]
63
+ f0 = f0[:, start_frame:, :]
64
+ hubert = hubert[:, start_frame:, :]
65
+ volume = volume[:, start_frame:, :]
66
+ _start_frame = 0
67
+ else:
68
+ _start_frame = start_frame
69
+ audio = self.__call__(audio, f0, hubert, volume, acc=acc, spk_id=spk_id, k_step=k_step,
70
+ method=method, spk_mix_dict=spk_mix_dict, start_frame=_start_frame)
71
+ if use_silence:
72
+ if start_frame > 0:
73
+ audio = F.pad(audio, (start_frame * self.vocoder.vocoder_hop_size, 0))
74
+ return audio
diffusion/logger/__init__.py ADDED
File without changes
diffusion/logger/saver.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ author: wayn391@mastertones
3
+ '''
4
+
5
+ import os
6
+ import json
7
+ import time
8
+ import yaml
9
+ import datetime
10
+ import torch
11
+ import matplotlib.pyplot as plt
12
+ from . import utils
13
+ from torch.utils.tensorboard import SummaryWriter
14
+
15
+ class Saver(object):
16
+ def __init__(
17
+ self,
18
+ args,
19
+ initial_global_step=-1):
20
+
21
+ self.expdir = args.env.expdir
22
+ self.sample_rate = args.data.sampling_rate
23
+
24
+ # cold start
25
+ self.global_step = initial_global_step
26
+ self.init_time = time.time()
27
+ self.last_time = time.time()
28
+
29
+ # makedirs
30
+ os.makedirs(self.expdir, exist_ok=True)
31
+
32
+ # path
33
+ self.path_log_info = os.path.join(self.expdir, 'log_info.txt')
34
+
35
+ # ckpt
36
+ os.makedirs(self.expdir, exist_ok=True)
37
+
38
+ # writer
39
+ self.writer = SummaryWriter(os.path.join(self.expdir, 'logs'))
40
+
41
+ # save config
42
+ path_config = os.path.join(self.expdir, 'config.yaml')
43
+ with open(path_config, "w") as out_config:
44
+ yaml.dump(dict(args), out_config)
45
+
46
+
47
+ def log_info(self, msg):
48
+ '''log method'''
49
+ if isinstance(msg, dict):
50
+ msg_list = []
51
+ for k, v in msg.items():
52
+ tmp_str = ''
53
+ if isinstance(v, int):
54
+ tmp_str = '{}: {:,}'.format(k, v)
55
+ else:
56
+ tmp_str = '{}: {}'.format(k, v)
57
+
58
+ msg_list.append(tmp_str)
59
+ msg_str = '\n'.join(msg_list)
60
+ else:
61
+ msg_str = msg
62
+
63
+ # dsplay
64
+ print(msg_str)
65
+
66
+ # save
67
+ with open(self.path_log_info, 'a') as fp:
68
+ fp.write(msg_str+'\n')
69
+
70
+ def log_value(self, dict):
71
+ for k, v in dict.items():
72
+ self.writer.add_scalar(k, v, self.global_step)
73
+
74
+ def log_spec(self, name, spec, spec_out, vmin=-14, vmax=3.5):
75
+ spec_cat = torch.cat([(spec_out - spec).abs() + vmin, spec, spec_out], -1)
76
+ spec = spec_cat[0]
77
+ if isinstance(spec, torch.Tensor):
78
+ spec = spec.cpu().numpy()
79
+ fig = plt.figure(figsize=(12, 9))
80
+ plt.pcolor(spec.T, vmin=vmin, vmax=vmax)
81
+ plt.tight_layout()
82
+ self.writer.add_figure(name, fig, self.global_step)
83
+
84
+ def log_audio(self, dict):
85
+ for k, v in dict.items():
86
+ self.writer.add_audio(k, v, global_step=self.global_step, sample_rate=self.sample_rate)
87
+
88
+ def get_interval_time(self, update=True):
89
+ cur_time = time.time()
90
+ time_interval = cur_time - self.last_time
91
+ if update:
92
+ self.last_time = cur_time
93
+ return time_interval
94
+
95
+ def get_total_time(self, to_str=True):
96
+ total_time = time.time() - self.init_time
97
+ if to_str:
98
+ total_time = str(datetime.timedelta(
99
+ seconds=total_time))[:-5]
100
+ return total_time
101
+
102
+ def save_model(
103
+ self,
104
+ model,
105
+ optimizer,
106
+ name='model',
107
+ postfix='',
108
+ to_json=False):
109
+ # path
110
+ if postfix:
111
+ postfix = '_' + postfix
112
+ path_pt = os.path.join(
113
+ self.expdir , name+postfix+'.pt')
114
+
115
+ # check
116
+ print(' [*] model checkpoint saved: {}'.format(path_pt))
117
+
118
+ # save
119
+ if optimizer is not None:
120
+ torch.save({
121
+ 'global_step': self.global_step,
122
+ 'model': model.state_dict(),
123
+ 'optimizer': optimizer.state_dict()}, path_pt)
124
+ else:
125
+ torch.save({
126
+ 'global_step': self.global_step,
127
+ 'model': model.state_dict()}, path_pt)
128
+
129
+ # to json
130
+ if to_json:
131
+ path_json = os.path.join(
132
+ self.expdir , name+'.json')
133
+ utils.to_json(path_params, path_json)
134
+
135
+ def delete_model(self, name='model', postfix=''):
136
+ # path
137
+ if postfix:
138
+ postfix = '_' + postfix
139
+ path_pt = os.path.join(
140
+ self.expdir , name+postfix+'.pt')
141
+
142
+ # delete
143
+ if os.path.exists(path_pt):
144
+ os.remove(path_pt)
145
+ print(' [*] model checkpoint deleted: {}'.format(path_pt))
146
+
147
+ def global_step_increment(self):
148
+ self.global_step += 1
149
+
150
+
diffusion/logger/utils.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import yaml
3
+ import json
4
+ import pickle
5
+ import torch
6
+
7
+ def traverse_dir(
8
+ root_dir,
9
+ extensions,
10
+ amount=None,
11
+ str_include=None,
12
+ str_exclude=None,
13
+ is_pure=False,
14
+ is_sort=False,
15
+ is_ext=True):
16
+
17
+ file_list = []
18
+ cnt = 0
19
+ for root, _, files in os.walk(root_dir):
20
+ for file in files:
21
+ if any([file.endswith(f".{ext}") for ext in extensions]):
22
+ # path
23
+ mix_path = os.path.join(root, file)
24
+ pure_path = mix_path[len(root_dir)+1:] if is_pure else mix_path
25
+
26
+ # amount
27
+ if (amount is not None) and (cnt == amount):
28
+ if is_sort:
29
+ file_list.sort()
30
+ return file_list
31
+
32
+ # check string
33
+ if (str_include is not None) and (str_include not in pure_path):
34
+ continue
35
+ if (str_exclude is not None) and (str_exclude in pure_path):
36
+ continue
37
+
38
+ if not is_ext:
39
+ ext = pure_path.split('.')[-1]
40
+ pure_path = pure_path[:-(len(ext)+1)]
41
+ file_list.append(pure_path)
42
+ cnt += 1
43
+ if is_sort:
44
+ file_list.sort()
45
+ return file_list
46
+
47
+
48
+
49
+ class DotDict(dict):
50
+ def __getattr__(*args):
51
+ val = dict.get(*args)
52
+ return DotDict(val) if type(val) is dict else val
53
+
54
+ __setattr__ = dict.__setitem__
55
+ __delattr__ = dict.__delitem__
56
+
57
+
58
+ def get_network_paras_amount(model_dict):
59
+ info = dict()
60
+ for model_name, model in model_dict.items():
61
+ # all_params = sum(p.numel() for p in model.parameters())
62
+ trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
63
+
64
+ info[model_name] = trainable_params
65
+ return info
66
+
67
+
68
+ def load_config(path_config):
69
+ with open(path_config, "r") as config:
70
+ args = yaml.safe_load(config)
71
+ args = DotDict(args)
72
+ # print(args)
73
+ return args
74
+
75
+ def save_config(path_config,config):
76
+ config = dict(config)
77
+ with open(path_config, "w") as f:
78
+ yaml.dump(config, f)
79
+
80
+ def to_json(path_params, path_json):
81
+ params = torch.load(path_params, map_location=torch.device('cpu'))
82
+ raw_state_dict = {}
83
+ for k, v in params.items():
84
+ val = v.flatten().numpy().tolist()
85
+ raw_state_dict[k] = val
86
+
87
+ with open(path_json, 'w') as outfile:
88
+ json.dump(raw_state_dict, outfile,indent= "\t")
89
+
90
+
91
+ def convert_tensor_to_numpy(tensor, is_squeeze=True):
92
+ if is_squeeze:
93
+ tensor = tensor.squeeze()
94
+ if tensor.requires_grad:
95
+ tensor = tensor.detach()
96
+ if tensor.is_cuda:
97
+ tensor = tensor.cpu()
98
+ return tensor.numpy()
99
+
100
+
101
+ def load_model(
102
+ expdir,
103
+ model,
104
+ optimizer,
105
+ name='model',
106
+ postfix='',
107
+ device='cpu'):
108
+ if postfix == '':
109
+ postfix = '_' + postfix
110
+ path = os.path.join(expdir, name+postfix)
111
+ path_pt = traverse_dir(expdir, ['pt'], is_ext=False)
112
+ global_step = 0
113
+ if len(path_pt) > 0:
114
+ steps = [s[len(path):] for s in path_pt]
115
+ maxstep = max([int(s) if s.isdigit() else 0 for s in steps])
116
+ if maxstep >= 0:
117
+ path_pt = path+str(maxstep)+'.pt'
118
+ else:
119
+ path_pt = path+'best.pt'
120
+ print(' [*] restoring model from', path_pt)
121
+ ckpt = torch.load(path_pt, map_location=torch.device(device))
122
+ global_step = ckpt['global_step']
123
+ model.load_state_dict(ckpt['model'], strict=False)
124
+ if ckpt.get('optimizer') != None:
125
+ optimizer.load_state_dict(ckpt['optimizer'])
126
+ return global_step, model, optimizer
diffusion/onnx_export.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusion_onnx import GaussianDiffusion
2
+ import os
3
+ import yaml
4
+ import torch
5
+ import torch.nn as nn
6
+ import numpy as np
7
+ from wavenet import WaveNet
8
+ import torch.nn.functional as F
9
+ import diffusion
10
+
11
+ class DotDict(dict):
12
+ def __getattr__(*args):
13
+ val = dict.get(*args)
14
+ return DotDict(val) if type(val) is dict else val
15
+
16
+ __setattr__ = dict.__setitem__
17
+ __delattr__ = dict.__delitem__
18
+
19
+
20
+ def load_model_vocoder(
21
+ model_path,
22
+ device='cpu'):
23
+ config_file = os.path.join(os.path.split(model_path)[0], 'config.yaml')
24
+ with open(config_file, "r") as config:
25
+ args = yaml.safe_load(config)
26
+ args = DotDict(args)
27
+
28
+ # load model
29
+ model = Unit2Mel(
30
+ args.data.encoder_out_channels,
31
+ args.model.n_spk,
32
+ args.model.use_pitch_aug,
33
+ 128,
34
+ args.model.n_layers,
35
+ args.model.n_chans,
36
+ args.model.n_hidden)
37
+
38
+ print(' [Loading] ' + model_path)
39
+ ckpt = torch.load(model_path, map_location=torch.device(device))
40
+ model.to(device)
41
+ model.load_state_dict(ckpt['model'])
42
+ model.eval()
43
+ return model, args
44
+
45
+
46
+ class Unit2Mel(nn.Module):
47
+ def __init__(
48
+ self,
49
+ input_channel,
50
+ n_spk,
51
+ use_pitch_aug=False,
52
+ out_dims=128,
53
+ n_layers=20,
54
+ n_chans=384,
55
+ n_hidden=256):
56
+ super().__init__()
57
+ self.unit_embed = nn.Linear(input_channel, n_hidden)
58
+ self.f0_embed = nn.Linear(1, n_hidden)
59
+ self.volume_embed = nn.Linear(1, n_hidden)
60
+ if use_pitch_aug:
61
+ self.aug_shift_embed = nn.Linear(1, n_hidden, bias=False)
62
+ else:
63
+ self.aug_shift_embed = None
64
+ self.n_spk = n_spk
65
+ if n_spk is not None and n_spk > 1:
66
+ self.spk_embed = nn.Embedding(n_spk, n_hidden)
67
+
68
+ # diffusion
69
+ self.decoder = GaussianDiffusion(out_dims, n_layers, n_chans, n_hidden)
70
+ self.hidden_size = n_hidden
71
+ self.speaker_map = torch.zeros((self.n_spk,1,1,n_hidden))
72
+
73
+
74
+
75
+ def forward(self, units, mel2ph, f0, volume, g = None):
76
+
77
+ '''
78
+ input:
79
+ B x n_frames x n_unit
80
+ return:
81
+ dict of B x n_frames x feat
82
+ '''
83
+
84
+ decoder_inp = F.pad(units, [0, 0, 1, 0])
85
+ mel2ph_ = mel2ph.unsqueeze(2).repeat([1, 1, units.shape[-1]])
86
+ units = torch.gather(decoder_inp, 1, mel2ph_) # [B, T, H]
87
+
88
+ x = self.unit_embed(units) + self.f0_embed((1 + f0.unsqueeze(-1) / 700).log()) + self.volume_embed(volume.unsqueeze(-1))
89
+
90
+ if self.n_spk is not None and self.n_spk > 1: # [N, S] * [S, B, 1, H]
91
+ g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
92
+ g = g * self.speaker_map # [N, S, B, 1, H]
93
+ g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
94
+ g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
95
+ x = x.transpose(1, 2) + g
96
+ return x
97
+ else:
98
+ return x.transpose(1, 2)
99
+
100
+
101
+ def init_spkembed(self, units, f0, volume, spk_id = None, spk_mix_dict = None, aug_shift = None,
102
+ gt_spec=None, infer=True, infer_speedup=10, method='dpm-solver', k_step=300, use_tqdm=True):
103
+
104
+ '''
105
+ input:
106
+ B x n_frames x n_unit
107
+ return:
108
+ dict of B x n_frames x feat
109
+ '''
110
+ x = self.unit_embed(units) + self.f0_embed((1+ f0 / 700).log()) + self.volume_embed(volume)
111
+ if self.n_spk is not None and self.n_spk > 1:
112
+ if spk_mix_dict is not None:
113
+ spk_embed_mix = torch.zeros((1,1,self.hidden_size))
114
+ for k, v in spk_mix_dict.items():
115
+ spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)
116
+ spk_embeddd = self.spk_embed(spk_id_torch)
117
+ self.speaker_map[k] = spk_embeddd
118
+ spk_embed_mix = spk_embed_mix + v * spk_embeddd
119
+ x = x + spk_embed_mix
120
+ else:
121
+ x = x + self.spk_embed(spk_id - 1)
122
+ self.speaker_map = self.speaker_map.unsqueeze(0)
123
+ self.speaker_map = self.speaker_map.detach()
124
+ return x.transpose(1, 2)
125
+
126
+ def OnnxExport(self, project_name=None, init_noise=None, export_encoder=True, export_denoise=True, export_pred=True, export_after=True):
127
+ hubert_hidden_size = 768
128
+ n_frames = 100
129
+ hubert = torch.randn((1, n_frames, hubert_hidden_size))
130
+ mel2ph = torch.arange(end=n_frames).unsqueeze(0).long()
131
+ f0 = torch.randn((1, n_frames))
132
+ volume = torch.randn((1, n_frames))
133
+ spk_mix = []
134
+ spks = {}
135
+ if self.n_spk is not None and self.n_spk > 1:
136
+ for i in range(self.n_spk):
137
+ spk_mix.append(1.0/float(self.n_spk))
138
+ spks.update({i:1.0/float(self.n_spk)})
139
+ spk_mix = torch.tensor(spk_mix)
140
+ spk_mix = spk_mix.repeat(n_frames, 1)
141
+ orgouttt = self.init_spkembed(hubert, f0.unsqueeze(-1), volume.unsqueeze(-1), spk_mix_dict=spks)
142
+ outtt = self.forward(hubert, mel2ph, f0, volume, spk_mix)
143
+ if export_encoder:
144
+ torch.onnx.export(
145
+ self,
146
+ (hubert, mel2ph, f0, volume, spk_mix),
147
+ f"{project_name}_encoder.onnx",
148
+ input_names=["hubert", "mel2ph", "f0", "volume", "spk_mix"],
149
+ output_names=["mel_pred"],
150
+ dynamic_axes={
151
+ "hubert": [1],
152
+ "f0": [1],
153
+ "volume": [1],
154
+ "mel2ph": [1],
155
+ "spk_mix": [0],
156
+ },
157
+ opset_version=16
158
+ )
159
+
160
+ self.decoder.OnnxExport(project_name, init_noise=init_noise, export_denoise=export_denoise, export_pred=export_pred, export_after=export_after)
161
+
162
+ def ExportOnnx(self, project_name=None):
163
+ hubert_hidden_size = 768
164
+ n_frames = 100
165
+ hubert = torch.randn((1, n_frames, hubert_hidden_size))
166
+ mel2ph = torch.arange(end=n_frames).unsqueeze(0).long()
167
+ f0 = torch.randn((1, n_frames))
168
+ volume = torch.randn((1, n_frames))
169
+ spk_mix = []
170
+ spks = {}
171
+ if self.n_spk is not None and self.n_spk > 1:
172
+ for i in range(self.n_spk):
173
+ spk_mix.append(1.0/float(self.n_spk))
174
+ spks.update({i:1.0/float(self.n_spk)})
175
+ spk_mix = torch.tensor(spk_mix)
176
+ orgouttt = self.orgforward(hubert, f0.unsqueeze(-1), volume.unsqueeze(-1), spk_mix_dict=spks)
177
+ outtt = self.forward(hubert, mel2ph, f0, volume, spk_mix)
178
+
179
+ torch.onnx.export(
180
+ self,
181
+ (hubert, mel2ph, f0, volume, spk_mix),
182
+ f"{project_name}_encoder.onnx",
183
+ input_names=["hubert", "mel2ph", "f0", "volume", "spk_mix"],
184
+ output_names=["mel_pred"],
185
+ dynamic_axes={
186
+ "hubert": [1],
187
+ "f0": [1],
188
+ "volume": [1],
189
+ "mel2ph": [1]
190
+ },
191
+ opset_version=16
192
+ )
193
+
194
+ condition = torch.randn(1,self.decoder.n_hidden,n_frames)
195
+ noise = torch.randn((1, 1, self.decoder.mel_bins, condition.shape[2]), dtype=torch.float32)
196
+ pndm_speedup = torch.LongTensor([100])
197
+ K_steps = torch.LongTensor([1000])
198
+ self.decoder = torch.jit.script(self.decoder)
199
+ self.decoder(condition, noise, pndm_speedup, K_steps)
200
+
201
+ torch.onnx.export(
202
+ self.decoder,
203
+ (condition, noise, pndm_speedup, K_steps),
204
+ f"{project_name}_diffusion.onnx",
205
+ input_names=["condition", "noise", "pndm_speedup", "K_steps"],
206
+ output_names=["mel"],
207
+ dynamic_axes={
208
+ "condition": [2],
209
+ "noise": [3],
210
+ },
211
+ opset_version=16
212
+ )
213
+
214
+
215
+ if __name__ == "__main__":
216
+ project_name = "dddsp"
217
+ model_path = f'{project_name}/model_500000.pt'
218
+
219
+ model, _ = load_model_vocoder(model_path)
220
+
221
+ # 分开Diffusion导出(需要使用MoeSS/MoeVoiceStudio或者自己编写Pndm/Dpm采样)
222
+ model.OnnxExport(project_name, export_encoder=True, export_denoise=True, export_pred=True, export_after=True)
223
+
224
+ # 合并Diffusion导出(Encoder和Diffusion分开,直接将Encoder的结果和初始噪声输入Diffusion即可)
225
+ # model.ExportOnnx(project_name)
226
+
diffusion/solver.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import numpy as np
4
+ import torch
5
+ import librosa
6
+ from diffusion.logger.saver import Saver
7
+ from diffusion.logger import utils
8
+ from torch import autocast
9
+ from torch.cuda.amp import GradScaler
10
+
11
+ def test(args, model, vocoder, loader_test, saver):
12
+ print(' [*] testing...')
13
+ model.eval()
14
+
15
+ # losses
16
+ test_loss = 0.
17
+
18
+ # intialization
19
+ num_batches = len(loader_test)
20
+ rtf_all = []
21
+
22
+ # run
23
+ with torch.no_grad():
24
+ for bidx, data in enumerate(loader_test):
25
+ fn = data['name'][0].split("/")[-1]
26
+ speaker = data['name'][0].split("/")[-2]
27
+ print('--------')
28
+ print('{}/{} - {}'.format(bidx, num_batches, fn))
29
+
30
+ # unpack data
31
+ for k in data.keys():
32
+ if not k.startswith('name'):
33
+ data[k] = data[k].to(args.device)
34
+ print('>>', data['name'][0])
35
+
36
+ # forward
37
+ st_time = time.time()
38
+ mel = model(
39
+ data['units'],
40
+ data['f0'],
41
+ data['volume'],
42
+ data['spk_id'],
43
+ gt_spec=None,
44
+ infer=True,
45
+ infer_speedup=args.infer.speedup,
46
+ method=args.infer.method)
47
+ signal = vocoder.infer(mel, data['f0'])
48
+ ed_time = time.time()
49
+
50
+ # RTF
51
+ run_time = ed_time - st_time
52
+ song_time = signal.shape[-1] / args.data.sampling_rate
53
+ rtf = run_time / song_time
54
+ print('RTF: {} | {} / {}'.format(rtf, run_time, song_time))
55
+ rtf_all.append(rtf)
56
+
57
+ # loss
58
+ for i in range(args.train.batch_size):
59
+ loss = model(
60
+ data['units'],
61
+ data['f0'],
62
+ data['volume'],
63
+ data['spk_id'],
64
+ gt_spec=data['mel'],
65
+ infer=False)
66
+ test_loss += loss.item()
67
+
68
+ # log mel
69
+ saver.log_spec(f"{speaker}_{fn}.wav", data['mel'], mel)
70
+
71
+ # log audi
72
+ path_audio = data['name_ext'][0]
73
+ audio, sr = librosa.load(path_audio, sr=args.data.sampling_rate)
74
+ if len(audio.shape) > 1:
75
+ audio = librosa.to_mono(audio)
76
+ audio = torch.from_numpy(audio).unsqueeze(0).to(signal)
77
+ saver.log_audio({f"{speaker}_{fn}_gt.wav": audio,f"{speaker}_{fn}_pred.wav": signal})
78
+ # report
79
+ test_loss /= args.train.batch_size
80
+ test_loss /= num_batches
81
+
82
+ # check
83
+ print(' [test_loss] test_loss:', test_loss)
84
+ print(' Real Time Factor', np.mean(rtf_all))
85
+ return test_loss
86
+
87
+
88
+ def train(args, initial_global_step, model, optimizer, scheduler, vocoder, loader_train, loader_test):
89
+ # saver
90
+ saver = Saver(args, initial_global_step=initial_global_step)
91
+
92
+ # model size
93
+ params_count = utils.get_network_paras_amount({'model': model})
94
+ saver.log_info('--- model size ---')
95
+ saver.log_info(params_count)
96
+
97
+ # run
98
+ num_batches = len(loader_train)
99
+ model.train()
100
+ saver.log_info('======= start training =======')
101
+ scaler = GradScaler()
102
+ if args.train.amp_dtype == 'fp32':
103
+ dtype = torch.float32
104
+ elif args.train.amp_dtype == 'fp16':
105
+ dtype = torch.float16
106
+ elif args.train.amp_dtype == 'bf16':
107
+ dtype = torch.bfloat16
108
+ else:
109
+ raise ValueError(' [x] Unknown amp_dtype: ' + args.train.amp_dtype)
110
+ saver.log_info("epoch|batch_idx/num_batches|output_dir|batch/s|lr|time|step")
111
+ for epoch in range(args.train.epochs):
112
+ for batch_idx, data in enumerate(loader_train):
113
+ saver.global_step_increment()
114
+ optimizer.zero_grad()
115
+
116
+ # unpack data
117
+ for k in data.keys():
118
+ if not k.startswith('name'):
119
+ data[k] = data[k].to(args.device)
120
+
121
+ # forward
122
+ if dtype == torch.float32:
123
+ loss = model(data['units'].float(), data['f0'], data['volume'], data['spk_id'],
124
+ aug_shift = data['aug_shift'], gt_spec=data['mel'].float(), infer=False)
125
+ else:
126
+ with autocast(device_type=args.device, dtype=dtype):
127
+ loss = model(data['units'], data['f0'], data['volume'], data['spk_id'],
128
+ aug_shift = data['aug_shift'], gt_spec=data['mel'], infer=False)
129
+
130
+ # handle nan loss
131
+ if torch.isnan(loss):
132
+ raise ValueError(' [x] nan loss ')
133
+ else:
134
+ # backpropagate
135
+ if dtype == torch.float32:
136
+ loss.backward()
137
+ optimizer.step()
138
+ else:
139
+ scaler.scale(loss).backward()
140
+ scaler.step(optimizer)
141
+ scaler.update()
142
+ scheduler.step()
143
+
144
+ # log loss
145
+ if saver.global_step % args.train.interval_log == 0:
146
+ current_lr = optimizer.param_groups[0]['lr']
147
+ saver.log_info(
148
+ 'epoch: {} | {:3d}/{:3d} | {} | batch/s: {:.2f} | lr: {:.6} | loss: {:.3f} | time: {} | step: {}'.format(
149
+ epoch,
150
+ batch_idx,
151
+ num_batches,
152
+ args.env.expdir,
153
+ args.train.interval_log/saver.get_interval_time(),
154
+ current_lr,
155
+ loss.item(),
156
+ saver.get_total_time(),
157
+ saver.global_step
158
+ )
159
+ )
160
+
161
+ saver.log_value({
162
+ 'train/loss': loss.item()
163
+ })
164
+
165
+ saver.log_value({
166
+ 'train/lr': current_lr
167
+ })
168
+
169
+ # validation
170
+ if saver.global_step % args.train.interval_val == 0:
171
+ optimizer_save = optimizer if args.train.save_opt else None
172
+
173
+ # save latest
174
+ saver.save_model(model, optimizer_save, postfix=f'{saver.global_step}')
175
+ last_val_step = saver.global_step - args.train.interval_val
176
+ if last_val_step % args.train.interval_force_save != 0:
177
+ saver.delete_model(postfix=f'{last_val_step}')
178
+
179
+ # run testing set
180
+ test_loss = test(args, model, vocoder, loader_test, saver)
181
+
182
+ # log loss
183
+ saver.log_info(
184
+ ' --- <validation> --- \nloss: {:.3f}. '.format(
185
+ test_loss,
186
+ )
187
+ )
188
+
189
+ saver.log_value({
190
+ 'validation/loss': test_loss
191
+ })
192
+
193
+ model.train()
194
+
195
+
diffusion/unit2mel.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import yaml
3
+ import torch
4
+ import torch.nn as nn
5
+ import numpy as np
6
+ from .diffusion import GaussianDiffusion
7
+ from .wavenet import WaveNet
8
+ from .vocoder import Vocoder
9
+
10
+ class DotDict(dict):
11
+ def __getattr__(*args):
12
+ val = dict.get(*args)
13
+ return DotDict(val) if type(val) is dict else val
14
+
15
+ __setattr__ = dict.__setitem__
16
+ __delattr__ = dict.__delitem__
17
+
18
+
19
+ def load_model_vocoder(
20
+ model_path,
21
+ device='cpu',
22
+ config_path = None
23
+ ):
24
+ if config_path is None: config_file = os.path.join(os.path.split(model_path)[0], 'config.yaml')
25
+ else: config_file = config_path
26
+
27
+ with open(config_file, "r") as config:
28
+ args = yaml.safe_load(config)
29
+ args = DotDict(args)
30
+
31
+ # load vocoder
32
+ vocoder = Vocoder(args.vocoder.type, args.vocoder.ckpt, device=device)
33
+
34
+ # load model
35
+ model = Unit2Mel(
36
+ args.data.encoder_out_channels,
37
+ args.model.n_spk,
38
+ args.model.use_pitch_aug,
39
+ vocoder.dimension,
40
+ args.model.n_layers,
41
+ args.model.n_chans,
42
+ args.model.n_hidden)
43
+
44
+ print(' [Loading] ' + model_path)
45
+ ckpt = torch.load(model_path, map_location=torch.device(device))
46
+ model.to(device)
47
+ model.load_state_dict(ckpt['model'])
48
+ model.eval()
49
+ return model, vocoder, args
50
+
51
+
52
+ class Unit2Mel(nn.Module):
53
+ def __init__(
54
+ self,
55
+ input_channel,
56
+ n_spk,
57
+ use_pitch_aug=False,
58
+ out_dims=128,
59
+ n_layers=20,
60
+ n_chans=384,
61
+ n_hidden=256):
62
+ super().__init__()
63
+ self.unit_embed = nn.Linear(input_channel, n_hidden)
64
+ self.f0_embed = nn.Linear(1, n_hidden)
65
+ self.volume_embed = nn.Linear(1, n_hidden)
66
+ if use_pitch_aug:
67
+ self.aug_shift_embed = nn.Linear(1, n_hidden, bias=False)
68
+ else:
69
+ self.aug_shift_embed = None
70
+ self.n_spk = n_spk
71
+ if n_spk is not None and n_spk > 1:
72
+ self.spk_embed = nn.Embedding(n_spk, n_hidden)
73
+
74
+ self.n_hidden = n_hidden
75
+ # diffusion
76
+ self.decoder = GaussianDiffusion(WaveNet(out_dims, n_layers, n_chans, n_hidden), out_dims=out_dims)
77
+ self.input_channel = input_channel
78
+
79
+ def init_spkembed(self, units, f0, volume, spk_id = None, spk_mix_dict = None, aug_shift = None,
80
+ gt_spec=None, infer=True, infer_speedup=10, method='dpm-solver', k_step=300, use_tqdm=True):
81
+
82
+ '''
83
+ input:
84
+ B x n_frames x n_unit
85
+ return:
86
+ dict of B x n_frames x feat
87
+ '''
88
+ x = self.unit_embed(units) + self.f0_embed((1+ f0 / 700).log()) + self.volume_embed(volume)
89
+ if self.n_spk is not None and self.n_spk > 1:
90
+ if spk_mix_dict is not None:
91
+ spk_embed_mix = torch.zeros((1,1,self.hidden_size))
92
+ for k, v in spk_mix_dict.items():
93
+ spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)
94
+ spk_embeddd = self.spk_embed(spk_id_torch)
95
+ self.speaker_map[k] = spk_embeddd
96
+ spk_embed_mix = spk_embed_mix + v * spk_embeddd
97
+ x = x + spk_embed_mix
98
+ else:
99
+ x = x + self.spk_embed(spk_id - 1)
100
+ self.speaker_map = self.speaker_map.unsqueeze(0)
101
+ self.speaker_map = self.speaker_map.detach()
102
+ return x.transpose(1, 2)
103
+
104
+ def init_spkmix(self, n_spk):
105
+ self.speaker_map = torch.zeros((n_spk,1,1,self.n_hidden))
106
+ hubert_hidden_size = self.input_channel
107
+ n_frames = 10
108
+ hubert = torch.randn((1, n_frames, hubert_hidden_size))
109
+ mel2ph = torch.arange(end=n_frames).unsqueeze(0).long()
110
+ f0 = torch.randn((1, n_frames))
111
+ volume = torch.randn((1, n_frames))
112
+ spks = {}
113
+ for i in range(n_spk):
114
+ spks.update({i:1.0/float(self.n_spk)})
115
+ orgouttt = self.init_spkembed(hubert, f0.unsqueeze(-1), volume.unsqueeze(-1), spk_mix_dict=spks)
116
+
117
+ def forward(self, units, f0, volume, spk_id = None, spk_mix_dict = None, aug_shift = None,
118
+ gt_spec=None, infer=True, infer_speedup=10, method='dpm-solver', k_step=300, use_tqdm=True):
119
+
120
+ '''
121
+ input:
122
+ B x n_frames x n_unit
123
+ return:
124
+ dict of B x n_frames x feat
125
+ '''
126
+
127
+ x = self.unit_embed(units) + self.f0_embed((1+ f0 / 700).log()) + self.volume_embed(volume)
128
+ if self.n_spk is not None and self.n_spk > 1:
129
+ if spk_mix_dict is not None:
130
+ for k, v in spk_mix_dict.items():
131
+ spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)
132
+ x = x + v * self.spk_embed(spk_id_torch)
133
+ else:
134
+ if spk_id.shape[1] > 1:
135
+ g = spk_id.reshape((spk_id.shape[0], spk_id.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
136
+ g = g * self.speaker_map # [N, S, B, 1, H]
137
+ g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
138
+ g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
139
+ x = x + g
140
+ else:
141
+ x = x + self.spk_embed(spk_id)
142
+ if self.aug_shift_embed is not None and aug_shift is not None:
143
+ x = x + self.aug_shift_embed(aug_shift / 5)
144
+ x = self.decoder(x, gt_spec=gt_spec, infer=infer, infer_speedup=infer_speedup, method=method, k_step=k_step, use_tqdm=use_tqdm)
145
+
146
+ return x
147
+
diffusion/vocoder.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from vdecoder.nsf_hifigan.nvSTFT import STFT
3
+ from vdecoder.nsf_hifigan.models import load_model,load_config
4
+ from torchaudio.transforms import Resample
5
+
6
+
7
+ class Vocoder:
8
+ def __init__(self, vocoder_type, vocoder_ckpt, device = None):
9
+ if device is None:
10
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
11
+ self.device = device
12
+
13
+ if vocoder_type == 'nsf-hifigan':
14
+ self.vocoder = NsfHifiGAN(vocoder_ckpt, device = device)
15
+ elif vocoder_type == 'nsf-hifigan-log10':
16
+ self.vocoder = NsfHifiGANLog10(vocoder_ckpt, device = device)
17
+ else:
18
+ raise ValueError(f" [x] Unknown vocoder: {vocoder_type}")
19
+
20
+ self.resample_kernel = {}
21
+ self.vocoder_sample_rate = self.vocoder.sample_rate()
22
+ self.vocoder_hop_size = self.vocoder.hop_size()
23
+ self.dimension = self.vocoder.dimension()
24
+
25
+ def extract(self, audio, sample_rate, keyshift=0):
26
+
27
+ # resample
28
+ if sample_rate == self.vocoder_sample_rate:
29
+ audio_res = audio
30
+ else:
31
+ key_str = str(sample_rate)
32
+ if key_str not in self.resample_kernel:
33
+ self.resample_kernel[key_str] = Resample(sample_rate, self.vocoder_sample_rate, lowpass_filter_width = 128).to(self.device)
34
+ audio_res = self.resample_kernel[key_str](audio)
35
+
36
+ # extract
37
+ mel = self.vocoder.extract(audio_res, keyshift=keyshift) # B, n_frames, bins
38
+ return mel
39
+
40
+ def infer(self, mel, f0):
41
+ f0 = f0[:,:mel.size(1),0] # B, n_frames
42
+ audio = self.vocoder(mel, f0)
43
+ return audio
44
+
45
+
46
+ class NsfHifiGAN(torch.nn.Module):
47
+ def __init__(self, model_path, device=None):
48
+ super().__init__()
49
+ if device is None:
50
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
51
+ self.device = device
52
+ self.model_path = model_path
53
+ self.model = None
54
+ self.h = load_config(model_path)
55
+ self.stft = STFT(
56
+ self.h.sampling_rate,
57
+ self.h.num_mels,
58
+ self.h.n_fft,
59
+ self.h.win_size,
60
+ self.h.hop_size,
61
+ self.h.fmin,
62
+ self.h.fmax)
63
+
64
+ def sample_rate(self):
65
+ return self.h.sampling_rate
66
+
67
+ def hop_size(self):
68
+ return self.h.hop_size
69
+
70
+ def dimension(self):
71
+ return self.h.num_mels
72
+
73
+ def extract(self, audio, keyshift=0):
74
+ mel = self.stft.get_mel(audio, keyshift=keyshift).transpose(1, 2) # B, n_frames, bins
75
+ return mel
76
+
77
+ def forward(self, mel, f0):
78
+ if self.model is None:
79
+ print('| Load HifiGAN: ', self.model_path)
80
+ self.model, self.h = load_model(self.model_path, device=self.device)
81
+ with torch.no_grad():
82
+ c = mel.transpose(1, 2)
83
+ audio = self.model(c, f0)
84
+ return audio
85
+
86
+ class NsfHifiGANLog10(NsfHifiGAN):
87
+ def forward(self, mel, f0):
88
+ if self.model is None:
89
+ print('| Load HifiGAN: ', self.model_path)
90
+ self.model, self.h = load_model(self.model_path, device=self.device)
91
+ with torch.no_grad():
92
+ c = 0.434294 * mel.transpose(1, 2)
93
+ audio = self.model(c, f0)
94
+ return audio
diffusion/wavenet.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from math import sqrt
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from torch.nn import Mish
8
+
9
+
10
+ class Conv1d(torch.nn.Conv1d):
11
+ def __init__(self, *args, **kwargs):
12
+ super().__init__(*args, **kwargs)
13
+ nn.init.kaiming_normal_(self.weight)
14
+
15
+
16
+ class SinusoidalPosEmb(nn.Module):
17
+ def __init__(self, dim):
18
+ super().__init__()
19
+ self.dim = dim
20
+
21
+ def forward(self, x):
22
+ device = x.device
23
+ half_dim = self.dim // 2
24
+ emb = math.log(10000) / (half_dim - 1)
25
+ emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
26
+ emb = x[:, None] * emb[None, :]
27
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
28
+ return emb
29
+
30
+
31
+ class ResidualBlock(nn.Module):
32
+ def __init__(self, encoder_hidden, residual_channels, dilation):
33
+ super().__init__()
34
+ self.residual_channels = residual_channels
35
+ self.dilated_conv = nn.Conv1d(
36
+ residual_channels,
37
+ 2 * residual_channels,
38
+ kernel_size=3,
39
+ padding=dilation,
40
+ dilation=dilation
41
+ )
42
+ self.diffusion_projection = nn.Linear(residual_channels, residual_channels)
43
+ self.conditioner_projection = nn.Conv1d(encoder_hidden, 2 * residual_channels, 1)
44
+ self.output_projection = nn.Conv1d(residual_channels, 2 * residual_channels, 1)
45
+
46
+ def forward(self, x, conditioner, diffusion_step):
47
+ diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
48
+ conditioner = self.conditioner_projection(conditioner)
49
+ y = x + diffusion_step
50
+
51
+ y = self.dilated_conv(y) + conditioner
52
+
53
+ # Using torch.split instead of torch.chunk to avoid using onnx::Slice
54
+ gate, filter = torch.split(y, [self.residual_channels, self.residual_channels], dim=1)
55
+ y = torch.sigmoid(gate) * torch.tanh(filter)
56
+
57
+ y = self.output_projection(y)
58
+
59
+ # Using torch.split instead of torch.chunk to avoid using onnx::Slice
60
+ residual, skip = torch.split(y, [self.residual_channels, self.residual_channels], dim=1)
61
+ return (x + residual) / math.sqrt(2.0), skip
62
+
63
+
64
+ class WaveNet(nn.Module):
65
+ def __init__(self, in_dims=128, n_layers=20, n_chans=384, n_hidden=256):
66
+ super().__init__()
67
+ self.input_projection = Conv1d(in_dims, n_chans, 1)
68
+ self.diffusion_embedding = SinusoidalPosEmb(n_chans)
69
+ self.mlp = nn.Sequential(
70
+ nn.Linear(n_chans, n_chans * 4),
71
+ Mish(),
72
+ nn.Linear(n_chans * 4, n_chans)
73
+ )
74
+ self.residual_layers = nn.ModuleList([
75
+ ResidualBlock(
76
+ encoder_hidden=n_hidden,
77
+ residual_channels=n_chans,
78
+ dilation=1
79
+ )
80
+ for i in range(n_layers)
81
+ ])
82
+ self.skip_projection = Conv1d(n_chans, n_chans, 1)
83
+ self.output_projection = Conv1d(n_chans, in_dims, 1)
84
+ nn.init.zeros_(self.output_projection.weight)
85
+
86
+ def forward(self, spec, diffusion_step, cond):
87
+ """
88
+ :param spec: [B, 1, M, T]
89
+ :param diffusion_step: [B, 1]
90
+ :param cond: [B, M, T]
91
+ :return:
92
+ """
93
+ x = spec.squeeze(1)
94
+ x = self.input_projection(x) # [B, residual_channel, T]
95
+
96
+ x = F.relu(x)
97
+ diffusion_step = self.diffusion_embedding(diffusion_step)
98
+ diffusion_step = self.mlp(diffusion_step)
99
+ skip = []
100
+ for layer in self.residual_layers:
101
+ x, skip_connection = layer(x, cond, diffusion_step)
102
+ skip.append(skip_connection)
103
+
104
+ x = torch.sum(torch.stack(skip), dim=0) / sqrt(len(self.residual_layers))
105
+ x = self.skip_projection(x)
106
+ x = F.relu(x)
107
+ x = self.output_projection(x) # [B, mel_bins, T]
108
+ return x[:, None, :, :]
inference/infer_tool.py CHANGED
@@ -6,19 +6,22 @@ import os
6
  import time
7
  from pathlib import Path
8
  from inference import slicer
 
9
 
10
  import librosa
11
  import numpy as np
12
  # import onnxruntime
13
- import parselmouth
14
  import soundfile
15
  import torch
16
  import torchaudio
17
 
18
  import cluster
19
- from hubert import hubert_model
20
  import utils
21
  from models import SynthesizerTrn
 
 
 
 
22
 
23
  logging.getLogger('matplotlib').setLevel(logging.WARNING)
24
 
@@ -114,25 +117,80 @@ class F0FilterException(Exception):
114
  class Svc(object):
115
  def __init__(self, net_g_path, config_path,
116
  device=None,
117
- cluster_model_path="logs/44k/kmeans_10000.pt"):
 
 
 
 
 
 
 
 
118
  self.net_g_path = net_g_path
 
 
 
119
  if device is None:
120
  self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
121
  else:
122
  self.dev = torch.device(device)
123
  self.net_g_ms = None
124
- self.hps_ms = utils.get_hparams_from_file(config_path)
125
- self.target_sample = self.hps_ms.data.sampling_rate
126
- self.hop_size = self.hps_ms.data.hop_length
127
- self.spk2id = self.hps_ms.spk
128
- # 加载hubert
129
- self.hubert_model = utils.get_hubert_model().to(self.dev)
130
- self.load_model()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  if os.path.exists(cluster_model_path):
132
- self.cluster_model = cluster.get_cluster_model(cluster_model_path)
133
-
134
- def load_model(self):
135
- # 获取模型配置
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  self.net_g_ms = SynthesizerTrn(
137
  self.hps_ms.data.filter_length // 2 + 1,
138
  self.hps_ms.train.segment_size // self.hps_ms.data.hop_length,
@@ -142,40 +200,53 @@ class Svc(object):
142
  _ = self.net_g_ms.half().eval().to(self.dev)
143
  else:
144
  _ = self.net_g_ms.eval().to(self.dev)
 
 
145
 
 
146
 
147
-
148
- def get_unit_f0(self, in_path, tran, cluster_infer_ratio, speaker, f0_filter ,F0_mean_pooling):
149
-
150
- wav, sr = librosa.load(in_path, sr=self.target_sample)
151
-
152
- if F0_mean_pooling == True:
153
- f0, uv = utils.compute_f0_uv_torchcrepe(torch.FloatTensor(wav), sampling_rate=self.target_sample, hop_length=self.hop_size,device=self.dev)
154
- if f0_filter and sum(f0) == 0:
155
- raise F0FilterException("未检测到人声")
156
- f0 = torch.FloatTensor(list(f0))
157
- uv = torch.FloatTensor(list(uv))
158
- if F0_mean_pooling == False:
159
- f0 = utils.compute_f0_parselmouth(wav, sampling_rate=self.target_sample, hop_length=self.hop_size)
160
- if f0_filter and sum(f0) == 0:
161
- raise F0FilterException("未检测到人声")
162
- f0, uv = utils.interpolate_f0(f0)
163
- f0 = torch.FloatTensor(f0)
164
- uv = torch.FloatTensor(uv)
165
 
166
  f0 = f0 * 2 ** (tran / 12)
167
- f0 = f0.unsqueeze(0).to(self.dev)
168
- uv = uv.unsqueeze(0).to(self.dev)
169
 
170
  wav16k = librosa.resample(wav, orig_sr=self.target_sample, target_sr=16000)
171
  wav16k = torch.from_numpy(wav16k).to(self.dev)
172
- c = utils.get_hubert_content(self.hubert_model, wav_16k_tensor=wav16k)
173
  c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1])
174
 
175
  if cluster_infer_ratio !=0:
176
- cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker).T
177
- cluster_c = torch.FloatTensor(cluster_c).to(self.dev)
178
- c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
  c = c.unsqueeze(0)
181
  return c, f0, uv
@@ -185,28 +256,91 @@ class Svc(object):
185
  auto_predict_f0=False,
186
  noice_scale=0.4,
187
  f0_filter=False,
188
- F0_mean_pooling=False
 
 
 
 
 
 
 
189
  ):
190
-
191
- speaker_id = self.spk2id.__dict__.get(speaker)
192
- if not speaker_id and type(speaker) is int:
193
- if len(self.spk2id.__dict__) >= speaker:
194
- speaker_id = speaker
195
- sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0)
196
- c, f0, uv = self.get_unit_f0(raw_path, tran, cluster_infer_ratio, speaker, f0_filter,F0_mean_pooling)
 
 
 
 
 
 
 
 
197
  if "half" in self.net_g_path and torch.cuda.is_available():
198
  c = c.half()
199
  with torch.no_grad():
200
  start = time.time()
201
- audio = self.net_g_ms.infer(c, f0=f0, g=sid, uv=uv, predict_f0=auto_predict_f0, noice_scale=noice_scale)[0,0].data.float()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
  use_time = time.time() - start
203
  print("vits use time:{}".format(use_time))
204
- return audio, audio.shape[-1]
205
 
206
  def clear_empty(self):
207
- # 清理显存
208
  torch.cuda.empty_cache()
209
 
 
 
 
 
 
 
 
 
 
 
210
  def slice_inference(self,
211
  raw_audio_path,
212
  spk,
@@ -219,9 +353,19 @@ class Svc(object):
219
  clip_seconds=0,
220
  lg_num=0,
221
  lgr_num =0.75,
222
- F0_mean_pooling = False
 
 
 
 
 
 
223
  ):
224
- wav_path = raw_audio_path
 
 
 
 
225
  chunks = slicer.cut(wav_path, db_thresh=slice_db)
226
  audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
227
  per_size = int(clip_seconds*audio_sr)
@@ -230,7 +374,62 @@ class Svc(object):
230
  lg_size_c_l = (lg_size-lg_size_r)//2
231
  lg_size_c_r = lg_size-lg_size_r-lg_size_c_l
232
  lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0
233
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
  audio = []
235
  for (slice_tag, data) in audio_data:
236
  print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
@@ -240,6 +439,7 @@ class Svc(object):
240
  print('jump empty segment')
241
  _audio = np.zeros(length)
242
  audio.extend(list(pad_array(_audio, length)))
 
243
  continue
244
  if per_size != 0:
245
  datas = split_list_by_n(data, per_size,lg_size)
@@ -254,12 +454,20 @@ class Svc(object):
254
  raw_path = io.BytesIO()
255
  soundfile.write(raw_path, dat, audio_sr, format="wav")
256
  raw_path.seek(0)
257
- out_audio, out_sr = self.infer(spk, tran, raw_path,
258
  cluster_infer_ratio=cluster_infer_ratio,
259
  auto_predict_f0=auto_predict_f0,
260
  noice_scale=noice_scale,
261
- F0_mean_pooling = F0_mean_pooling
 
 
 
 
 
 
 
262
  )
 
263
  _audio = out_audio.cpu().numpy()
264
  pad_len = int(self.target_sample * pad_seconds)
265
  _audio = _audio[pad_len:-pad_len]
@@ -278,10 +486,10 @@ class RealTimeVC:
278
  def __init__(self):
279
  self.last_chunk = None
280
  self.last_o = None
281
- self.chunk_len = 16000 # 区块长度
282
- self.pre_len = 3840 # 交叉淡化长度,640的倍数
283
 
284
- """输入输出都是1numpy 音频波形数组"""
285
 
286
  def process(self, svc_model, speaker_id, f_pitch_change, input_wav_path,
287
  cluster_infer_ratio=0,
@@ -301,7 +509,7 @@ class RealTimeVC:
301
  auto_predict_f0=auto_predict_f0,
302
  noice_scale=noice_scale,
303
  f0_filter=f0_filter)
304
-
305
  audio = audio.cpu().numpy()
306
  self.last_chunk = audio[-self.pre_len:]
307
  self.last_o = audio
@@ -322,3 +530,4 @@ class RealTimeVC:
322
  self.last_chunk = audio[-self.pre_len:]
323
  self.last_o = audio
324
  return ret[self.chunk_len:2 * self.chunk_len]
 
 
6
  import time
7
  from pathlib import Path
8
  from inference import slicer
9
+ import gc
10
 
11
  import librosa
12
  import numpy as np
13
  # import onnxruntime
 
14
  import soundfile
15
  import torch
16
  import torchaudio
17
 
18
  import cluster
 
19
  import utils
20
  from models import SynthesizerTrn
21
+ import pickle
22
+
23
+ from diffusion.unit2mel import load_model_vocoder
24
+ import yaml
25
 
26
  logging.getLogger('matplotlib').setLevel(logging.WARNING)
27
 
 
117
  class Svc(object):
118
  def __init__(self, net_g_path, config_path,
119
  device=None,
120
+ cluster_model_path="logs/44k/kmeans_10000.pt",
121
+ nsf_hifigan_enhance = False,
122
+ diffusion_model_path="logs/44k/diffusion/model_0.pt",
123
+ diffusion_config_path="configs/diffusion.yaml",
124
+ shallow_diffusion = False,
125
+ only_diffusion = False,
126
+ spk_mix_enable = False,
127
+ feature_retrieval = False
128
+ ):
129
  self.net_g_path = net_g_path
130
+ self.only_diffusion = only_diffusion
131
+ self.shallow_diffusion = shallow_diffusion
132
+ self.feature_retrieval = feature_retrieval
133
  if device is None:
134
  self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
135
  else:
136
  self.dev = torch.device(device)
137
  self.net_g_ms = None
138
+ if not self.only_diffusion:
139
+ self.hps_ms = utils.get_hparams_from_file(config_path)
140
+ self.target_sample = self.hps_ms.data.sampling_rate
141
+ self.hop_size = self.hps_ms.data.hop_length
142
+ self.spk2id = self.hps_ms.spk
143
+ try:
144
+ self.vol_embedding = self.hps_ms.model.vol_embedding
145
+ except Exception as e:
146
+ self.vol_embedding = False
147
+ try:
148
+ self.speech_encoder = self.hps_ms.model.speech_encoder
149
+ except Exception as e:
150
+ self.speech_encoder = 'vec768l12'
151
+
152
+ self.nsf_hifigan_enhance = nsf_hifigan_enhance
153
+ if self.shallow_diffusion or self.only_diffusion:
154
+ if os.path.exists(diffusion_model_path) and os.path.exists(diffusion_model_path):
155
+ self.diffusion_model,self.vocoder,self.diffusion_args = load_model_vocoder(diffusion_model_path,self.dev,config_path=diffusion_config_path)
156
+ if self.only_diffusion:
157
+ self.target_sample = self.diffusion_args.data.sampling_rate
158
+ self.hop_size = self.diffusion_args.data.block_size
159
+ self.spk2id = self.diffusion_args.spk
160
+ self.speech_encoder = self.diffusion_args.data.encoder
161
+ if spk_mix_enable:
162
+ self.diffusion_model.init_spkmix(len(self.spk2id))
163
+ else:
164
+ print("No diffusion model or config found. Shallow diffusion mode will False")
165
+ self.shallow_diffusion = self.only_diffusion = False
166
+
167
+ # load hubert and model
168
+ if not self.only_diffusion:
169
+ self.load_model(spk_mix_enable)
170
+ self.hubert_model = utils.get_speech_encoder(self.speech_encoder,device=self.dev)
171
+ self.volume_extractor = utils.Volume_Extractor(self.hop_size)
172
+ else:
173
+ self.hubert_model = utils.get_speech_encoder(self.diffusion_args.data.encoder,device=self.dev)
174
+ self.volume_extractor = utils.Volume_Extractor(self.diffusion_args.data.block_size)
175
+
176
  if os.path.exists(cluster_model_path):
177
+ if self.feature_retrieval:
178
+ with open(cluster_model_path,"rb") as f:
179
+ self.cluster_model = pickle.load(f)
180
+ self.big_npy = None
181
+ self.now_spk_id = -1
182
+ else:
183
+ self.cluster_model = cluster.get_cluster_model(cluster_model_path)
184
+ else:
185
+ self.feature_retrieval=False
186
+
187
+ if self.shallow_diffusion : self.nsf_hifigan_enhance = False
188
+ if self.nsf_hifigan_enhance:
189
+ from modules.enhancer import Enhancer
190
+ self.enhancer = Enhancer('nsf-hifigan', 'pretrain/nsf_hifigan/model',device=self.dev)
191
+
192
+ def load_model(self, spk_mix_enable=False):
193
+ # get model configuration
194
  self.net_g_ms = SynthesizerTrn(
195
  self.hps_ms.data.filter_length // 2 + 1,
196
  self.hps_ms.train.segment_size // self.hps_ms.data.hop_length,
 
200
  _ = self.net_g_ms.half().eval().to(self.dev)
201
  else:
202
  _ = self.net_g_ms.eval().to(self.dev)
203
+ if spk_mix_enable:
204
+ self.net_g_ms.EnableCharacterMix(len(self.spk2id), self.dev)
205
 
206
+ def get_unit_f0(self, wav, tran, cluster_infer_ratio, speaker, f0_filter ,f0_predictor,cr_threshold=0.05):
207
 
208
+ f0_predictor_object = utils.get_f0_predictor(f0_predictor,hop_length=self.hop_size,sampling_rate=self.target_sample,device=self.dev,threshold=cr_threshold)
209
+
210
+ f0, uv = f0_predictor_object.compute_f0_uv(wav)
211
+ if f0_filter and sum(f0) == 0:
212
+ raise F0FilterException("No voice detected")
213
+ f0 = torch.FloatTensor(f0).to(self.dev)
214
+ uv = torch.FloatTensor(uv).to(self.dev)
 
 
 
 
 
 
 
 
 
 
 
215
 
216
  f0 = f0 * 2 ** (tran / 12)
217
+ f0 = f0.unsqueeze(0)
218
+ uv = uv.unsqueeze(0)
219
 
220
  wav16k = librosa.resample(wav, orig_sr=self.target_sample, target_sr=16000)
221
  wav16k = torch.from_numpy(wav16k).to(self.dev)
222
+ c = self.hubert_model.encoder(wav16k)
223
  c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1])
224
 
225
  if cluster_infer_ratio !=0:
226
+ if self.feature_retrieval:
227
+ speaker_id = self.spk2id.get(speaker)
228
+ if speaker_id is None:
229
+ raise RuntimeError("The name you entered is not in the speaker list!")
230
+ if not speaker_id and type(speaker) is int:
231
+ if len(self.spk2id.__dict__) >= speaker:
232
+ speaker_id = speaker
233
+ feature_index = self.cluster_model[speaker_id]
234
+ feat_np = c.transpose(0,1).cpu().numpy()
235
+ if self.big_npy is None or self.now_spk_id != speaker_id:
236
+ self.big_npy = feature_index.reconstruct_n(0, feature_index.ntotal)
237
+ self.now_spk_id = speaker_id
238
+ print("starting feature retrieval...")
239
+ score, ix = feature_index.search(feat_np, k=8)
240
+ weight = np.square(1 / score)
241
+ weight /= weight.sum(axis=1, keepdims=True)
242
+ npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
243
+ c = cluster_infer_ratio * npy + (1 - cluster_infer_ratio) * feat_np
244
+ c = torch.FloatTensor(c).to(self.dev).transpose(0,1)
245
+ print("end feature retrieval...")
246
+ else:
247
+ cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker).T
248
+ cluster_c = torch.FloatTensor(cluster_c).to(self.dev)
249
+ c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c
250
 
251
  c = c.unsqueeze(0)
252
  return c, f0, uv
 
256
  auto_predict_f0=False,
257
  noice_scale=0.4,
258
  f0_filter=False,
259
+ f0_predictor='pm',
260
+ enhancer_adaptive_key = 0,
261
+ cr_threshold = 0.05,
262
+ k_step = 100,
263
+ frame = 0,
264
+ spk_mix = False,
265
+ second_encoding = False,
266
+ loudness_envelope_adjustment = 1
267
  ):
268
+ wav, sr = librosa.load(raw_path, sr=self.target_sample)
269
+ if spk_mix:
270
+ c, f0, uv = self.get_unit_f0(wav, tran, 0, None, f0_filter,f0_predictor,cr_threshold=cr_threshold)
271
+ n_frames = f0.size(1)
272
+ sid = speaker[:, frame:frame+n_frames].transpose(0,1)
273
+ else:
274
+ speaker_id = self.spk2id.get(speaker)
275
+ if not speaker_id and type(speaker) is int:
276
+ if len(self.spk2id.__dict__) >= speaker:
277
+ speaker_id = speaker
278
+ if speaker_id is None:
279
+ raise RuntimeError("The name you entered is not in the speaker list!")
280
+ sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0)
281
+ c, f0, uv = self.get_unit_f0(wav, tran, cluster_infer_ratio, speaker, f0_filter,f0_predictor,cr_threshold=cr_threshold)
282
+ n_frames = f0.size(1)
283
  if "half" in self.net_g_path and torch.cuda.is_available():
284
  c = c.half()
285
  with torch.no_grad():
286
  start = time.time()
287
+ vol = None
288
+ if not self.only_diffusion:
289
+ vol = self.volume_extractor.extract(torch.FloatTensor(wav).to(self.dev)[None,:])[None,:].to(self.dev) if self.vol_embedding else None
290
+ audio,f0 = self.net_g_ms.infer(c, f0=f0, g=sid, uv=uv, predict_f0=auto_predict_f0, noice_scale=noice_scale,vol=vol)
291
+ audio = audio[0,0].data.float()
292
+ audio_mel = self.vocoder.extract(audio[None,:],self.target_sample) if self.shallow_diffusion else None
293
+ else:
294
+ audio = torch.FloatTensor(wav).to(self.dev)
295
+ audio_mel = None
296
+ if self.only_diffusion or self.shallow_diffusion:
297
+ vol = self.volume_extractor.extract(audio[None,:])[None,:,None].to(self.dev) if vol==None else vol[:,:,None]
298
+ if self.shallow_diffusion and second_encoding:
299
+ audio16k = librosa.resample(audio.detach().cpu().numpy(), orig_sr=self.target_sample, target_sr=16000)
300
+ audio16k = torch.from_numpy(audio16k).to(self.dev)
301
+ c = self.hubert_model.encoder(audio16k)
302
+ c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1])
303
+ f0 = f0[:,:,None]
304
+ c = c.transpose(-1,-2)
305
+ audio_mel = self.diffusion_model(
306
+ c,
307
+ f0,
308
+ vol,
309
+ spk_id = sid,
310
+ spk_mix_dict = None,
311
+ gt_spec=audio_mel,
312
+ infer=True,
313
+ infer_speedup=self.diffusion_args.infer.speedup,
314
+ method=self.diffusion_args.infer.method,
315
+ k_step=k_step)
316
+ audio = self.vocoder.infer(audio_mel, f0).squeeze()
317
+ if self.nsf_hifigan_enhance:
318
+ audio, _ = self.enhancer.enhance(
319
+ audio[None,:],
320
+ self.target_sample,
321
+ f0[:,:,None],
322
+ self.hps_ms.data.hop_length,
323
+ adaptive_key = enhancer_adaptive_key)
324
+ if loudness_envelope_adjustment != 1:
325
+ audio = utils.change_rms(wav,self.target_sample,audio,self.target_sample,loudness_envelope_adjustment)
326
  use_time = time.time() - start
327
  print("vits use time:{}".format(use_time))
328
+ return audio, audio.shape[-1], n_frames
329
 
330
  def clear_empty(self):
331
+ # clean up vram
332
  torch.cuda.empty_cache()
333
 
334
+ def unload_model(self):
335
+ # unload model
336
+ self.net_g_ms = self.net_g_ms.to("cpu")
337
+ del self.net_g_ms
338
+ if hasattr(self,"enhancer"):
339
+ self.enhancer.enhancer = self.enhancer.enhancer.to("cpu")
340
+ del self.enhancer.enhancer
341
+ del self.enhancer
342
+ gc.collect()
343
+
344
  def slice_inference(self,
345
  raw_audio_path,
346
  spk,
 
353
  clip_seconds=0,
354
  lg_num=0,
355
  lgr_num =0.75,
356
+ f0_predictor='pm',
357
+ enhancer_adaptive_key = 0,
358
+ cr_threshold = 0.05,
359
+ k_step = 100,
360
+ use_spk_mix = False,
361
+ second_encoding = False,
362
+ loudness_envelope_adjustment = 1
363
  ):
364
+ if use_spk_mix:
365
+ if len(self.spk2id) == 1:
366
+ spk = self.spk2id.keys()[0]
367
+ use_spk_mix = False
368
+ wav_path = Path(raw_audio_path).with_suffix('.wav')
369
  chunks = slicer.cut(wav_path, db_thresh=slice_db)
370
  audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
371
  per_size = int(clip_seconds*audio_sr)
 
374
  lg_size_c_l = (lg_size-lg_size_r)//2
375
  lg_size_c_r = lg_size-lg_size_r-lg_size_c_l
376
  lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0
377
+
378
+ if use_spk_mix:
379
+ assert len(self.spk2id) == len(spk)
380
+ audio_length = 0
381
+ for (slice_tag, data) in audio_data:
382
+ aud_length = int(np.ceil(len(data) / audio_sr * self.target_sample))
383
+ if slice_tag:
384
+ audio_length += aud_length // self.hop_size
385
+ continue
386
+ if per_size != 0:
387
+ datas = split_list_by_n(data, per_size,lg_size)
388
+ else:
389
+ datas = [data]
390
+ for k,dat in enumerate(datas):
391
+ pad_len = int(audio_sr * pad_seconds)
392
+ per_length = int(np.ceil(len(dat) / audio_sr * self.target_sample))
393
+ a_length = per_length + 2 * pad_len
394
+ audio_length += a_length // self.hop_size
395
+ audio_length += len(audio_data)
396
+ spk_mix_tensor = torch.zeros(size=(len(spk), audio_length)).to(self.dev)
397
+ for i in range(len(spk)):
398
+ last_end = None
399
+ for mix in spk[i]:
400
+ if mix[3]<0. or mix[2]<0.:
401
+ raise RuntimeError("mix value must higer Than zero!")
402
+ begin = int(audio_length * mix[0])
403
+ end = int(audio_length * mix[1])
404
+ length = end - begin
405
+ if length<=0:
406
+ raise RuntimeError("begin Must lower Than end!")
407
+ step = (mix[3] - mix[2])/length
408
+ if last_end is not None:
409
+ if last_end != begin:
410
+ raise RuntimeError("[i]EndTime Must Equal [i+1]BeginTime!")
411
+ last_end = end
412
+ if step == 0.:
413
+ spk_mix_data = torch.zeros(length).to(self.dev) + mix[2]
414
+ else:
415
+ spk_mix_data = torch.arange(mix[2],mix[3],step).to(self.dev)
416
+ if(len(spk_mix_data)<length):
417
+ num_pad = length - len(spk_mix_data)
418
+ spk_mix_data = torch.nn.functional.pad(spk_mix_data, [0, num_pad], mode="reflect").to(self.dev)
419
+ spk_mix_tensor[i][begin:end] = spk_mix_data[:length]
420
+
421
+ spk_mix_ten = torch.sum(spk_mix_tensor,dim=0).unsqueeze(0).to(self.dev)
422
+ # spk_mix_tensor[0][spk_mix_ten<0.001] = 1.0
423
+ for i, x in enumerate(spk_mix_ten[0]):
424
+ if x == 0.0:
425
+ spk_mix_ten[0][i] = 1.0
426
+ spk_mix_tensor[:,i] = 1.0 / len(spk)
427
+ spk_mix_tensor = spk_mix_tensor / spk_mix_ten
428
+ if not ((torch.sum(spk_mix_tensor,dim=0) - 1.)<0.0001).all():
429
+ raise RuntimeError("sum(spk_mix_tensor) not equal 1")
430
+ spk = spk_mix_tensor
431
+
432
+ global_frame = 0
433
  audio = []
434
  for (slice_tag, data) in audio_data:
435
  print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
 
439
  print('jump empty segment')
440
  _audio = np.zeros(length)
441
  audio.extend(list(pad_array(_audio, length)))
442
+ global_frame += length // self.hop_size
443
  continue
444
  if per_size != 0:
445
  datas = split_list_by_n(data, per_size,lg_size)
 
454
  raw_path = io.BytesIO()
455
  soundfile.write(raw_path, dat, audio_sr, format="wav")
456
  raw_path.seek(0)
457
+ out_audio, out_sr, out_frame = self.infer(spk, tran, raw_path,
458
  cluster_infer_ratio=cluster_infer_ratio,
459
  auto_predict_f0=auto_predict_f0,
460
  noice_scale=noice_scale,
461
+ f0_predictor = f0_predictor,
462
+ enhancer_adaptive_key = enhancer_adaptive_key,
463
+ cr_threshold = cr_threshold,
464
+ k_step = k_step,
465
+ frame = global_frame,
466
+ spk_mix = use_spk_mix,
467
+ second_encoding = second_encoding,
468
+ loudness_envelope_adjustment = loudness_envelope_adjustment
469
  )
470
+ global_frame += out_frame
471
  _audio = out_audio.cpu().numpy()
472
  pad_len = int(self.target_sample * pad_seconds)
473
  _audio = _audio[pad_len:-pad_len]
 
486
  def __init__(self):
487
  self.last_chunk = None
488
  self.last_o = None
489
+ self.chunk_len = 16000 # chunk length
490
+ self.pre_len = 3840 # cross fade length, multiples of 640
491
 
492
+ # Input and output are 1-dimensional numpy waveform arrays
493
 
494
  def process(self, svc_model, speaker_id, f_pitch_change, input_wav_path,
495
  cluster_infer_ratio=0,
 
509
  auto_predict_f0=auto_predict_f0,
510
  noice_scale=noice_scale,
511
  f0_filter=f0_filter)
512
+
513
  audio = audio.cpu().numpy()
514
  self.last_chunk = audio[-self.pre_len:]
515
  self.last_o = audio
 
530
  self.last_chunk = audio[-self.pre_len:]
531
  self.last_o = audio
532
  return ret[self.chunk_len:2 * self.chunk_len]
533
+
inference/infer_tool_grad.py CHANGED
@@ -131,7 +131,7 @@ class VitsSvc(object):
131
  with torch.no_grad():
132
  x_tst = stn_tst.unsqueeze(0).to(self.device)
133
  x_tst = torch.repeat_interleave(x_tst, repeats=2, dim=1).transpose(1, 2)
134
- audio = self.SVCVITS.infer(x_tst, f0=f0, g=sid)[0,0].data.float()
135
  return audio, audio.shape[-1]
136
 
137
  def inference(self,srcaudio,chara,tran,slice_db):
 
131
  with torch.no_grad():
132
  x_tst = stn_tst.unsqueeze(0).to(self.device)
133
  x_tst = torch.repeat_interleave(x_tst, repeats=2, dim=1).transpose(1, 2)
134
+ audio,_ = self.SVCVITS.infer(x_tst, f0=f0, g=sid)[0,0].data.float()
135
  return audio, audio.shape[-1]
136
 
137
  def inference(self,srcaudio,chara,tran,slice_db):
modules/F0Predictor/CrepeF0Predictor.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules.F0Predictor.F0Predictor import F0Predictor
2
+ from modules.F0Predictor.crepe import CrepePitchExtractor
3
+ import torch
4
+
5
+ class CrepeF0Predictor(F0Predictor):
6
+ def __init__(self,hop_length=512,f0_min=50,f0_max=1100,device=None,sampling_rate=44100,threshold=0.05,model="full"):
7
+ self.F0Creper = CrepePitchExtractor(hop_length=hop_length,f0_min=f0_min,f0_max=f0_max,device=device,threshold=threshold,model=model)
8
+ self.hop_length = hop_length
9
+ self.f0_min = f0_min
10
+ self.f0_max = f0_max
11
+ self.device = device
12
+ self.threshold = threshold
13
+ self.sampling_rate = sampling_rate
14
+
15
+ def compute_f0(self,wav,p_len=None):
16
+ x = torch.FloatTensor(wav).to(self.device)
17
+ if p_len is None:
18
+ p_len = x.shape[0]//self.hop_length
19
+ else:
20
+ assert abs(p_len-x.shape[0]//self.hop_length) < 4, "pad length error"
21
+ f0,uv = self.F0Creper(x[None,:].float(),self.sampling_rate,pad_to=p_len)
22
+ return f0
23
+
24
+ def compute_f0_uv(self,wav,p_len=None):
25
+ x = torch.FloatTensor(wav).to(self.device)
26
+ if p_len is None:
27
+ p_len = x.shape[0]//self.hop_length
28
+ else:
29
+ assert abs(p_len-x.shape[0]//self.hop_length) < 4, "pad length error"
30
+ f0,uv = self.F0Creper(x[None,:].float(),self.sampling_rate,pad_to=p_len)
31
+ return f0,uv
modules/F0Predictor/DioF0Predictor.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules.F0Predictor.F0Predictor import F0Predictor
2
+ import pyworld
3
+ import numpy as np
4
+
5
+ class DioF0Predictor(F0Predictor):
6
+ def __init__(self,hop_length=512,f0_min=50,f0_max=1100,sampling_rate=44100):
7
+ self.hop_length = hop_length
8
+ self.f0_min = f0_min
9
+ self.f0_max = f0_max
10
+ self.sampling_rate = sampling_rate
11
+
12
+ def interpolate_f0(self,f0):
13
+ '''
14
+ 对F0进行插值处理
15
+ '''
16
+
17
+ data = np.reshape(f0, (f0.size, 1))
18
+
19
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
20
+ vuv_vector[data > 0.0] = 1.0
21
+ vuv_vector[data <= 0.0] = 0.0
22
+
23
+ ip_data = data
24
+
25
+ frame_number = data.size
26
+ last_value = 0.0
27
+ for i in range(frame_number):
28
+ if data[i] <= 0.0:
29
+ j = i + 1
30
+ for j in range(i + 1, frame_number):
31
+ if data[j] > 0.0:
32
+ break
33
+ if j < frame_number - 1:
34
+ if last_value > 0.0:
35
+ step = (data[j] - data[i - 1]) / float(j - i)
36
+ for k in range(i, j):
37
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
38
+ else:
39
+ for k in range(i, j):
40
+ ip_data[k] = data[j]
41
+ else:
42
+ for k in range(i, frame_number):
43
+ ip_data[k] = last_value
44
+ else:
45
+ ip_data[i] = data[i] #这里可能存在一个没有必要的拷贝
46
+ last_value = data[i]
47
+
48
+ return ip_data[:,0], vuv_vector[:,0]
49
+
50
+ def resize_f0(self,x, target_len):
51
+ source = np.array(x)
52
+ source[source<0.001] = np.nan
53
+ target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source)
54
+ res = np.nan_to_num(target)
55
+ return res
56
+
57
+ def compute_f0(self,wav,p_len=None):
58
+ if p_len is None:
59
+ p_len = wav.shape[0]//self.hop_length
60
+ f0, t = pyworld.dio(
61
+ wav.astype(np.double),
62
+ fs=self.sampling_rate,
63
+ f0_floor=self.f0_min,
64
+ f0_ceil=self.f0_max,
65
+ frame_period=1000 * self.hop_length / self.sampling_rate,
66
+ )
67
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
68
+ for index, pitch in enumerate(f0):
69
+ f0[index] = round(pitch, 1)
70
+ return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
71
+
72
+ def compute_f0_uv(self,wav,p_len=None):
73
+ if p_len is None:
74
+ p_len = wav.shape[0]//self.hop_length
75
+ f0, t = pyworld.dio(
76
+ wav.astype(np.double),
77
+ fs=self.sampling_rate,
78
+ f0_floor=self.f0_min,
79
+ f0_ceil=self.f0_max,
80
+ frame_period=1000 * self.hop_length / self.sampling_rate,
81
+ )
82
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
83
+ for index, pitch in enumerate(f0):
84
+ f0[index] = round(pitch, 1)
85
+ return self.interpolate_f0(self.resize_f0(f0, p_len))
modules/F0Predictor/F0Predictor.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class F0Predictor(object):
2
+ def compute_f0(self,wav,p_len):
3
+ '''
4
+ input: wav:[signal_length]
5
+ p_len:int
6
+ output: f0:[signal_length//hop_length]
7
+ '''
8
+ pass
9
+
10
+ def compute_f0_uv(self,wav,p_len):
11
+ '''
12
+ input: wav:[signal_length]
13
+ p_len:int
14
+ output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
15
+ '''
16
+ pass
modules/F0Predictor/HarvestF0Predictor.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules.F0Predictor.F0Predictor import F0Predictor
2
+ import pyworld
3
+ import numpy as np
4
+
5
+ class HarvestF0Predictor(F0Predictor):
6
+ def __init__(self,hop_length=512,f0_min=50,f0_max=1100,sampling_rate=44100):
7
+ self.hop_length = hop_length
8
+ self.f0_min = f0_min
9
+ self.f0_max = f0_max
10
+ self.sampling_rate = sampling_rate
11
+
12
+ def interpolate_f0(self,f0):
13
+ '''
14
+ 对F0进行插值处理
15
+ '''
16
+
17
+ data = np.reshape(f0, (f0.size, 1))
18
+
19
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
20
+ vuv_vector[data > 0.0] = 1.0
21
+ vuv_vector[data <= 0.0] = 0.0
22
+
23
+ ip_data = data
24
+
25
+ frame_number = data.size
26
+ last_value = 0.0
27
+ for i in range(frame_number):
28
+ if data[i] <= 0.0:
29
+ j = i + 1
30
+ for j in range(i + 1, frame_number):
31
+ if data[j] > 0.0:
32
+ break
33
+ if j < frame_number - 1:
34
+ if last_value > 0.0:
35
+ step = (data[j] - data[i - 1]) / float(j - i)
36
+ for k in range(i, j):
37
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
38
+ else:
39
+ for k in range(i, j):
40
+ ip_data[k] = data[j]
41
+ else:
42
+ for k in range(i, frame_number):
43
+ ip_data[k] = last_value
44
+ else:
45
+ ip_data[i] = data[i] #这里可能存在一个没有必要的拷贝
46
+ last_value = data[i]
47
+
48
+ return ip_data[:,0], vuv_vector[:,0]
49
+
50
+ def resize_f0(self,x, target_len):
51
+ source = np.array(x)
52
+ source[source<0.001] = np.nan
53
+ target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source)
54
+ res = np.nan_to_num(target)
55
+ return res
56
+
57
+ def compute_f0(self,wav,p_len=None):
58
+ if p_len is None:
59
+ p_len = wav.shape[0]//self.hop_length
60
+ f0, t = pyworld.harvest(
61
+ wav.astype(np.double),
62
+ fs=self.hop_length,
63
+ f0_ceil=self.f0_max,
64
+ f0_floor=self.f0_min,
65
+ frame_period=1000 * self.hop_length / self.sampling_rate,
66
+ )
67
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs)
68
+ return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
69
+
70
+ def compute_f0_uv(self,wav,p_len=None):
71
+ if p_len is None:
72
+ p_len = wav.shape[0]//self.hop_length
73
+ f0, t = pyworld.harvest(
74
+ wav.astype(np.double),
75
+ fs=self.sampling_rate,
76
+ f0_floor=self.f0_min,
77
+ f0_ceil=self.f0_max,
78
+ frame_period=1000 * self.hop_length / self.sampling_rate,
79
+ )
80
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
81
+ return self.interpolate_f0(self.resize_f0(f0, p_len))
modules/F0Predictor/PMF0Predictor.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules.F0Predictor.F0Predictor import F0Predictor
2
+ import parselmouth
3
+ import numpy as np
4
+
5
+ class PMF0Predictor(F0Predictor):
6
+ def __init__(self,hop_length=512,f0_min=50,f0_max=1100,sampling_rate=44100):
7
+ self.hop_length = hop_length
8
+ self.f0_min = f0_min
9
+ self.f0_max = f0_max
10
+ self.sampling_rate = sampling_rate
11
+
12
+
13
+ def interpolate_f0(self,f0):
14
+ '''
15
+ 对F0进行插值处理
16
+ '''
17
+
18
+ data = np.reshape(f0, (f0.size, 1))
19
+
20
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
+ vuv_vector[data > 0.0] = 1.0
22
+ vuv_vector[data <= 0.0] = 0.0
23
+
24
+ ip_data = data
25
+
26
+ frame_number = data.size
27
+ last_value = 0.0
28
+ for i in range(frame_number):
29
+ if data[i] <= 0.0:
30
+ j = i + 1
31
+ for j in range(i + 1, frame_number):
32
+ if data[j] > 0.0:
33
+ break
34
+ if j < frame_number - 1:
35
+ if last_value > 0.0:
36
+ step = (data[j] - data[i - 1]) / float(j - i)
37
+ for k in range(i, j):
38
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
39
+ else:
40
+ for k in range(i, j):
41
+ ip_data[k] = data[j]
42
+ else:
43
+ for k in range(i, frame_number):
44
+ ip_data[k] = last_value
45
+ else:
46
+ ip_data[i] = data[i] #这里可能存在一个没有必要的拷贝
47
+ last_value = data[i]
48
+
49
+ return ip_data[:,0], vuv_vector[:,0]
50
+
51
+ def compute_f0(self,wav,p_len=None):
52
+ x = wav
53
+ if p_len is None:
54
+ p_len = x.shape[0]//self.hop_length
55
+ else:
56
+ assert abs(p_len-x.shape[0]//self.hop_length) < 4, "pad length error"
57
+ time_step = self.hop_length / self.sampling_rate * 1000
58
+ f0 = parselmouth.Sound(x, self.sampling_rate).to_pitch_ac(
59
+ time_step=time_step / 1000, voicing_threshold=0.6,
60
+ pitch_floor=self.f0_min, pitch_ceiling=self.f0_max).selected_array['frequency']
61
+
62
+ pad_size=(p_len - len(f0) + 1) // 2
63
+ if(pad_size>0 or p_len - len(f0) - pad_size>0):
64
+ f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
65
+ f0,uv = self.interpolate_f0(f0)
66
+ return f0
67
+
68
+ def compute_f0_uv(self,wav,p_len=None):
69
+ x = wav
70
+ if p_len is None:
71
+ p_len = x.shape[0]//self.hop_length
72
+ else:
73
+ assert abs(p_len-x.shape[0]//self.hop_length) < 4, "pad length error"
74
+ time_step = self.hop_length / self.sampling_rate * 1000
75
+ f0 = parselmouth.Sound(x, self.sampling_rate).to_pitch_ac(
76
+ time_step=time_step / 1000, voicing_threshold=0.6,
77
+ pitch_floor=self.f0_min, pitch_ceiling=self.f0_max).selected_array['frequency']
78
+
79
+ pad_size=(p_len - len(f0) + 1) // 2
80
+ if(pad_size>0 or p_len - len(f0) - pad_size>0):
81
+ f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
82
+ f0,uv = self.interpolate_f0(f0)
83
+ return f0,uv
modules/F0Predictor/__init__.py ADDED
File without changes
modules/F0Predictor/crepe.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional,Union
2
+ try:
3
+ from typing import Literal
4
+ except Exception as e:
5
+ from typing_extensions import Literal
6
+ import numpy as np
7
+ import torch
8
+ import torchcrepe
9
+ from torch import nn
10
+ from torch.nn import functional as F
11
+ import scipy
12
+
13
+ #from:https://github.com/fishaudio/fish-diffusion
14
+
15
+ def repeat_expand(
16
+ content: Union[torch.Tensor, np.ndarray], target_len: int, mode: str = "nearest"
17
+ ):
18
+ """Repeat content to target length.
19
+ This is a wrapper of torch.nn.functional.interpolate.
20
+
21
+ Args:
22
+ content (torch.Tensor): tensor
23
+ target_len (int): target length
24
+ mode (str, optional): interpolation mode. Defaults to "nearest".
25
+
26
+ Returns:
27
+ torch.Tensor: tensor
28
+ """
29
+
30
+ ndim = content.ndim
31
+
32
+ if content.ndim == 1:
33
+ content = content[None, None]
34
+ elif content.ndim == 2:
35
+ content = content[None]
36
+
37
+ assert content.ndim == 3
38
+
39
+ is_np = isinstance(content, np.ndarray)
40
+ if is_np:
41
+ content = torch.from_numpy(content)
42
+
43
+ results = torch.nn.functional.interpolate(content, size=target_len, mode=mode)
44
+
45
+ if is_np:
46
+ results = results.numpy()
47
+
48
+ if ndim == 1:
49
+ return results[0, 0]
50
+ elif ndim == 2:
51
+ return results[0]
52
+
53
+
54
+ class BasePitchExtractor:
55
+ def __init__(
56
+ self,
57
+ hop_length: int = 512,
58
+ f0_min: float = 50.0,
59
+ f0_max: float = 1100.0,
60
+ keep_zeros: bool = True,
61
+ ):
62
+ """Base pitch extractor.
63
+
64
+ Args:
65
+ hop_length (int, optional): Hop length. Defaults to 512.
66
+ f0_min (float, optional): Minimum f0. Defaults to 50.0.
67
+ f0_max (float, optional): Maximum f0. Defaults to 1100.0.
68
+ keep_zeros (bool, optional): Whether keep zeros in pitch. Defaults to True.
69
+ """
70
+
71
+ self.hop_length = hop_length
72
+ self.f0_min = f0_min
73
+ self.f0_max = f0_max
74
+ self.keep_zeros = keep_zeros
75
+
76
+ def __call__(self, x, sampling_rate=44100, pad_to=None):
77
+ raise NotImplementedError("BasePitchExtractor is not callable.")
78
+
79
+ def post_process(self, x, sampling_rate, f0, pad_to):
80
+ if isinstance(f0, np.ndarray):
81
+ f0 = torch.from_numpy(f0).float().to(x.device)
82
+
83
+ if pad_to is None:
84
+ return f0
85
+
86
+ f0 = repeat_expand(f0, pad_to)
87
+
88
+ if self.keep_zeros:
89
+ return f0
90
+
91
+ vuv_vector = torch.zeros_like(f0)
92
+ vuv_vector[f0 > 0.0] = 1.0
93
+ vuv_vector[f0 <= 0.0] = 0.0
94
+
95
+ # 去掉0频率, 并线性插值
96
+ nzindex = torch.nonzero(f0).squeeze()
97
+ f0 = torch.index_select(f0, dim=0, index=nzindex).cpu().numpy()
98
+ time_org = self.hop_length / sampling_rate * nzindex.cpu().numpy()
99
+ time_frame = np.arange(pad_to) * self.hop_length / sampling_rate
100
+
101
+ if f0.shape[0] <= 0:
102
+ return torch.zeros(pad_to, dtype=torch.float, device=x.device),torch.zeros(pad_to, dtype=torch.float, device=x.device)
103
+
104
+ if f0.shape[0] == 1:
105
+ return torch.ones(pad_to, dtype=torch.float, device=x.device) * f0[0],torch.ones(pad_to, dtype=torch.float, device=x.device)
106
+
107
+ # 大概可以用 torch 重写?
108
+ f0 = np.interp(time_frame, time_org, f0, left=f0[0], right=f0[-1])
109
+ vuv_vector = vuv_vector.cpu().numpy()
110
+ vuv_vector = np.ceil(scipy.ndimage.zoom(vuv_vector,pad_to/len(vuv_vector),order = 0))
111
+
112
+ return f0,vuv_vector
113
+
114
+
115
+ class MaskedAvgPool1d(nn.Module):
116
+ def __init__(
117
+ self, kernel_size: int, stride: Optional[int] = None, padding: Optional[int] = 0
118
+ ):
119
+ """An implementation of mean pooling that supports masked values.
120
+
121
+ Args:
122
+ kernel_size (int): The size of the median pooling window.
123
+ stride (int, optional): The stride of the median pooling window. Defaults to None.
124
+ padding (int, optional): The padding of the median pooling window. Defaults to 0.
125
+ """
126
+
127
+ super(MaskedAvgPool1d, self).__init__()
128
+ self.kernel_size = kernel_size
129
+ self.stride = stride or kernel_size
130
+ self.padding = padding
131
+
132
+ def forward(self, x, mask=None):
133
+ ndim = x.dim()
134
+ if ndim == 2:
135
+ x = x.unsqueeze(1)
136
+
137
+ assert (
138
+ x.dim() == 3
139
+ ), "Input tensor must have 2 or 3 dimensions (batch_size, channels, width)"
140
+
141
+ # Apply the mask by setting masked elements to zero, or make NaNs zero
142
+ if mask is None:
143
+ mask = ~torch.isnan(x)
144
+
145
+ # Ensure mask has the same shape as the input tensor
146
+ assert x.shape == mask.shape, "Input tensor and mask must have the same shape"
147
+
148
+ masked_x = torch.where(mask, x, torch.zeros_like(x))
149
+ # Create a ones kernel with the same number of channels as the input tensor
150
+ ones_kernel = torch.ones(x.size(1), 1, self.kernel_size, device=x.device)
151
+
152
+ # Perform sum pooling
153
+ sum_pooled = nn.functional.conv1d(
154
+ masked_x,
155
+ ones_kernel,
156
+ stride=self.stride,
157
+ padding=self.padding,
158
+ groups=x.size(1),
159
+ )
160
+
161
+ # Count the non-masked (valid) elements in each pooling window
162
+ valid_count = nn.functional.conv1d(
163
+ mask.float(),
164
+ ones_kernel,
165
+ stride=self.stride,
166
+ padding=self.padding,
167
+ groups=x.size(1),
168
+ )
169
+ valid_count = valid_count.clamp(min=1) # Avoid division by zero
170
+
171
+ # Perform masked average pooling
172
+ avg_pooled = sum_pooled / valid_count
173
+
174
+ # Fill zero values with NaNs
175
+ avg_pooled[avg_pooled == 0] = float("nan")
176
+
177
+ if ndim == 2:
178
+ return avg_pooled.squeeze(1)
179
+
180
+ return avg_pooled
181
+
182
+
183
+ class MaskedMedianPool1d(nn.Module):
184
+ def __init__(
185
+ self, kernel_size: int, stride: Optional[int] = None, padding: Optional[int] = 0
186
+ ):
187
+ """An implementation of median pooling that supports masked values.
188
+
189
+ This implementation is inspired by the median pooling implementation in
190
+ https://gist.github.com/rwightman/f2d3849281624be7c0f11c85c87c1598
191
+
192
+ Args:
193
+ kernel_size (int): The size of the median pooling window.
194
+ stride (int, optional): The stride of the median pooling window. Defaults to None.
195
+ padding (int, optional): The padding of the median pooling window. Defaults to 0.
196
+ """
197
+
198
+ super(MaskedMedianPool1d, self).__init__()
199
+ self.kernel_size = kernel_size
200
+ self.stride = stride or kernel_size
201
+ self.padding = padding
202
+
203
+ def forward(self, x, mask=None):
204
+ ndim = x.dim()
205
+ if ndim == 2:
206
+ x = x.unsqueeze(1)
207
+
208
+ assert (
209
+ x.dim() == 3
210
+ ), "Input tensor must have 2 or 3 dimensions (batch_size, channels, width)"
211
+
212
+ if mask is None:
213
+ mask = ~torch.isnan(x)
214
+
215
+ assert x.shape == mask.shape, "Input tensor and mask must have the same shape"
216
+
217
+ masked_x = torch.where(mask, x, torch.zeros_like(x))
218
+
219
+ x = F.pad(masked_x, (self.padding, self.padding), mode="reflect")
220
+ mask = F.pad(
221
+ mask.float(), (self.padding, self.padding), mode="constant", value=0
222
+ )
223
+
224
+ x = x.unfold(2, self.kernel_size, self.stride)
225
+ mask = mask.unfold(2, self.kernel_size, self.stride)
226
+
227
+ x = x.contiguous().view(x.size()[:3] + (-1,))
228
+ mask = mask.contiguous().view(mask.size()[:3] + (-1,)).to(x.device)
229
+
230
+ # Combine the mask with the input tensor
231
+ #x_masked = torch.where(mask.bool(), x, torch.fill_(torch.zeros_like(x),float("inf")))
232
+ x_masked = torch.where(mask.bool(), x, torch.FloatTensor([float("inf")]).to(x.device))
233
+
234
+ # Sort the masked tensor along the last dimension
235
+ x_sorted, _ = torch.sort(x_masked, dim=-1)
236
+
237
+ # Compute the count of non-masked (valid) values
238
+ valid_count = mask.sum(dim=-1)
239
+
240
+ # Calculate the index of the median value for each pooling window
241
+ median_idx = (torch.div((valid_count - 1), 2, rounding_mode='trunc')).clamp(min=0)
242
+
243
+ # Gather the median values using the calculated indices
244
+ median_pooled = x_sorted.gather(-1, median_idx.unsqueeze(-1).long()).squeeze(-1)
245
+
246
+ # Fill infinite values with NaNs
247
+ median_pooled[torch.isinf(median_pooled)] = float("nan")
248
+
249
+ if ndim == 2:
250
+ return median_pooled.squeeze(1)
251
+
252
+ return median_pooled
253
+
254
+
255
+ class CrepePitchExtractor(BasePitchExtractor):
256
+ def __init__(
257
+ self,
258
+ hop_length: int = 512,
259
+ f0_min: float = 50.0,
260
+ f0_max: float = 1100.0,
261
+ threshold: float = 0.05,
262
+ keep_zeros: bool = False,
263
+ device = None,
264
+ model: Literal["full", "tiny"] = "full",
265
+ use_fast_filters: bool = True,
266
+ decoder="viterbi"
267
+ ):
268
+ super().__init__(hop_length, f0_min, f0_max, keep_zeros)
269
+ if decoder == "viterbi":
270
+ self.decoder = torchcrepe.decode.viterbi
271
+ elif decoder == "argmax":
272
+ self.decoder = torchcrepe.decode.argmax
273
+ elif decoder == "weighted_argmax":
274
+ self.decoder = torchcrepe.decode.weighted_argmax
275
+ else:
276
+ raise "Unknown decoder"
277
+ self.threshold = threshold
278
+ self.model = model
279
+ self.use_fast_filters = use_fast_filters
280
+ self.hop_length = hop_length
281
+ if device is None:
282
+ self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
283
+ else:
284
+ self.dev = torch.device(device)
285
+ if self.use_fast_filters:
286
+ self.median_filter = MaskedMedianPool1d(3, 1, 1).to(device)
287
+ self.mean_filter = MaskedAvgPool1d(3, 1, 1).to(device)
288
+
289
+ def __call__(self, x, sampling_rate=44100, pad_to=None):
290
+ """Extract pitch using crepe.
291
+
292
+
293
+ Args:
294
+ x (torch.Tensor): Audio signal, shape (1, T).
295
+ sampling_rate (int, optional): Sampling rate. Defaults to 44100.
296
+ pad_to (int, optional): Pad to length. Defaults to None.
297
+
298
+ Returns:
299
+ torch.Tensor: Pitch, shape (T // hop_length,).
300
+ """
301
+
302
+ assert x.ndim == 2, f"Expected 2D tensor, got {x.ndim}D tensor."
303
+ assert x.shape[0] == 1, f"Expected 1 channel, got {x.shape[0]} channels."
304
+
305
+ x = x.to(self.dev)
306
+ f0, pd = torchcrepe.predict(
307
+ x,
308
+ sampling_rate,
309
+ self.hop_length,
310
+ self.f0_min,
311
+ self.f0_max,
312
+ pad=True,
313
+ model=self.model,
314
+ batch_size=1024,
315
+ device=x.device,
316
+ return_periodicity=True,
317
+ decoder=self.decoder
318
+ )
319
+
320
+ # Filter, remove silence, set uv threshold, refer to the original warehouse readme
321
+ if self.use_fast_filters:
322
+ pd = self.median_filter(pd)
323
+ else:
324
+ pd = torchcrepe.filter.median(pd, 3)
325
+
326
+ pd = torchcrepe.threshold.Silence(-60.0)(pd, x, sampling_rate, 512)
327
+ f0 = torchcrepe.threshold.At(self.threshold)(f0, pd)
328
+
329
+ if self.use_fast_filters:
330
+ f0 = self.mean_filter(f0)
331
+ else:
332
+ f0 = torchcrepe.filter.mean(f0, 3)
333
+
334
+ f0 = torch.where(torch.isnan(f0), torch.full_like(f0, 0), f0)[0]
335
+
336
+ if torch.all(f0 == 0):
337
+ rtn = f0.cpu().numpy() if pad_to==None else np.zeros(pad_to)
338
+ return rtn,rtn
339
+
340
+ return self.post_process(x, sampling_rate, f0, pad_to)
modules/enhancer.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from vdecoder.nsf_hifigan.nvSTFT import STFT
5
+ from vdecoder.nsf_hifigan.models import load_model
6
+ from torchaudio.transforms import Resample
7
+
8
+ class Enhancer:
9
+ def __init__(self, enhancer_type, enhancer_ckpt, device=None):
10
+ if device is None:
11
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
12
+ self.device = device
13
+
14
+ if enhancer_type == 'nsf-hifigan':
15
+ self.enhancer = NsfHifiGAN(enhancer_ckpt, device=self.device)
16
+ else:
17
+ raise ValueError(f" [x] Unknown enhancer: {enhancer_type}")
18
+
19
+ self.resample_kernel = {}
20
+ self.enhancer_sample_rate = self.enhancer.sample_rate()
21
+ self.enhancer_hop_size = self.enhancer.hop_size()
22
+
23
+ def enhance(self,
24
+ audio, # 1, T
25
+ sample_rate,
26
+ f0, # 1, n_frames, 1
27
+ hop_size,
28
+ adaptive_key = 0,
29
+ silence_front = 0
30
+ ):
31
+ # enhancer start time
32
+ start_frame = int(silence_front * sample_rate / hop_size)
33
+ real_silence_front = start_frame * hop_size / sample_rate
34
+ audio = audio[:, int(np.round(real_silence_front * sample_rate)) : ]
35
+ f0 = f0[: , start_frame :, :]
36
+
37
+ # adaptive parameters
38
+ adaptive_factor = 2 ** ( -adaptive_key / 12)
39
+ adaptive_sample_rate = 100 * int(np.round(self.enhancer_sample_rate / adaptive_factor / 100))
40
+ real_factor = self.enhancer_sample_rate / adaptive_sample_rate
41
+
42
+ # resample the ddsp output
43
+ if sample_rate == adaptive_sample_rate:
44
+ audio_res = audio
45
+ else:
46
+ key_str = str(sample_rate) + str(adaptive_sample_rate)
47
+ if key_str not in self.resample_kernel:
48
+ self.resample_kernel[key_str] = Resample(sample_rate, adaptive_sample_rate, lowpass_filter_width = 128).to(self.device)
49
+ audio_res = self.resample_kernel[key_str](audio)
50
+
51
+ n_frames = int(audio_res.size(-1) // self.enhancer_hop_size + 1)
52
+
53
+ # resample f0
54
+ f0_np = f0.squeeze(0).squeeze(-1).cpu().numpy()
55
+ f0_np *= real_factor
56
+ time_org = (hop_size / sample_rate) * np.arange(len(f0_np)) / real_factor
57
+ time_frame = (self.enhancer_hop_size / self.enhancer_sample_rate) * np.arange(n_frames)
58
+ f0_res = np.interp(time_frame, time_org, f0_np, left=f0_np[0], right=f0_np[-1])
59
+ f0_res = torch.from_numpy(f0_res).unsqueeze(0).float().to(self.device) # 1, n_frames
60
+
61
+ # enhance
62
+ enhanced_audio, enhancer_sample_rate = self.enhancer(audio_res, f0_res)
63
+
64
+ # resample the enhanced output
65
+ if adaptive_factor != 0:
66
+ key_str = str(adaptive_sample_rate) + str(enhancer_sample_rate)
67
+ if key_str not in self.resample_kernel:
68
+ self.resample_kernel[key_str] = Resample(adaptive_sample_rate, enhancer_sample_rate, lowpass_filter_width = 128).to(self.device)
69
+ enhanced_audio = self.resample_kernel[key_str](enhanced_audio)
70
+
71
+ # pad the silence frames
72
+ if start_frame > 0:
73
+ enhanced_audio = F.pad(enhanced_audio, (int(np.round(enhancer_sample_rate * real_silence_front)), 0))
74
+
75
+ return enhanced_audio, enhancer_sample_rate
76
+
77
+
78
+ class NsfHifiGAN(torch.nn.Module):
79
+ def __init__(self, model_path, device=None):
80
+ super().__init__()
81
+ if device is None:
82
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
83
+ self.device = device
84
+ print('| Load HifiGAN: ', model_path)
85
+ self.model, self.h = load_model(model_path, device=self.device)
86
+
87
+ def sample_rate(self):
88
+ return self.h.sampling_rate
89
+
90
+ def hop_size(self):
91
+ return self.h.hop_size
92
+
93
+ def forward(self, audio, f0):
94
+ stft = STFT(
95
+ self.h.sampling_rate,
96
+ self.h.num_mels,
97
+ self.h.n_fft,
98
+ self.h.win_size,
99
+ self.h.hop_size,
100
+ self.h.fmin,
101
+ self.h.fmax)
102
+ with torch.no_grad():
103
+ mel = stft.get_mel(audio)
104
+ enhanced_audio = self.model(mel, f0[:,:mel.size(-1)]).view(-1)
105
+ return enhanced_audio, self.h.sampling_rate
vdecoder/hifiganwithsnake/alias/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ from .filter import *
5
+ from .resample import *
6
+ from .act import *
vdecoder/hifiganwithsnake/alias/act.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+
8
+ from torch import sin, pow
9
+ from torch.nn import Parameter
10
+ from .resample import UpSample1d, DownSample1d
11
+
12
+
13
+ class Activation1d(nn.Module):
14
+ def __init__(self,
15
+ activation,
16
+ up_ratio: int = 2,
17
+ down_ratio: int = 2,
18
+ up_kernel_size: int = 12,
19
+ down_kernel_size: int = 12):
20
+ super().__init__()
21
+ self.up_ratio = up_ratio
22
+ self.down_ratio = down_ratio
23
+ self.act = activation
24
+ self.upsample = UpSample1d(up_ratio, up_kernel_size)
25
+ self.downsample = DownSample1d(down_ratio, down_kernel_size)
26
+
27
+ # x: [B,C,T]
28
+ def forward(self, x):
29
+ x = self.upsample(x)
30
+ x = self.act(x)
31
+ x = self.downsample(x)
32
+
33
+ return x
34
+
35
+
36
+ class SnakeBeta(nn.Module):
37
+ '''
38
+ A modified Snake function which uses separate parameters for the magnitude of the periodic components
39
+ Shape:
40
+ - Input: (B, C, T)
41
+ - Output: (B, C, T), same shape as the input
42
+ Parameters:
43
+ - alpha - trainable parameter that controls frequency
44
+ - beta - trainable parameter that controls magnitude
45
+ References:
46
+ - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
47
+ https://arxiv.org/abs/2006.08195
48
+ Examples:
49
+ >>> a1 = snakebeta(256)
50
+ >>> x = torch.randn(256)
51
+ >>> x = a1(x)
52
+ '''
53
+
54
+ def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
55
+ '''
56
+ Initialization.
57
+ INPUT:
58
+ - in_features: shape of the input
59
+ - alpha - trainable parameter that controls frequency
60
+ - beta - trainable parameter that controls magnitude
61
+ alpha is initialized to 1 by default, higher values = higher-frequency.
62
+ beta is initialized to 1 by default, higher values = higher-magnitude.
63
+ alpha will be trained along with the rest of your model.
64
+ '''
65
+ super(SnakeBeta, self).__init__()
66
+ self.in_features = in_features
67
+ # initialize alpha
68
+ self.alpha_logscale = alpha_logscale
69
+ if self.alpha_logscale: # log scale alphas initialized to zeros
70
+ self.alpha = Parameter(torch.zeros(in_features) * alpha)
71
+ self.beta = Parameter(torch.zeros(in_features) * alpha)
72
+ else: # linear scale alphas initialized to ones
73
+ self.alpha = Parameter(torch.ones(in_features) * alpha)
74
+ self.beta = Parameter(torch.ones(in_features) * alpha)
75
+ self.alpha.requires_grad = alpha_trainable
76
+ self.beta.requires_grad = alpha_trainable
77
+ self.no_div_by_zero = 0.000000001
78
+
79
+ def forward(self, x):
80
+ '''
81
+ Forward pass of the function.
82
+ Applies the function to the input elementwise.
83
+ SnakeBeta = x + 1/b * sin^2 (xa)
84
+ '''
85
+ alpha = self.alpha.unsqueeze(
86
+ 0).unsqueeze(-1) # line up with x to [B, C, T]
87
+ beta = self.beta.unsqueeze(0).unsqueeze(-1)
88
+ if self.alpha_logscale:
89
+ alpha = torch.exp(alpha)
90
+ beta = torch.exp(beta)
91
+ x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
92
+ return x
93
+
94
+
95
+ class Mish(nn.Module):
96
+ """
97
+ Mish activation function is proposed in "Mish: A Self
98
+ Regularized Non-Monotonic Neural Activation Function"
99
+ paper, https://arxiv.org/abs/1908.08681.
100
+ """
101
+
102
+ def __init__(self):
103
+ super().__init__()
104
+
105
+ def forward(self, x):
106
+ return x * torch.tanh(F.softplus(x))
107
+
108
+
109
+ class SnakeAlias(nn.Module):
110
+ def __init__(self,
111
+ channels,
112
+ up_ratio: int = 2,
113
+ down_ratio: int = 2,
114
+ up_kernel_size: int = 12,
115
+ down_kernel_size: int = 12):
116
+ super().__init__()
117
+ self.up_ratio = up_ratio
118
+ self.down_ratio = down_ratio
119
+ self.act = SnakeBeta(channels, alpha_logscale=True)
120
+ self.upsample = UpSample1d(up_ratio, up_kernel_size)
121
+ self.downsample = DownSample1d(down_ratio, down_kernel_size)
122
+
123
+ # x: [B,C,T]
124
+ def forward(self, x):
125
+ x = self.upsample(x)
126
+ x = self.act(x)
127
+ x = self.downsample(x)
128
+
129
+ return x
vdecoder/hifiganwithsnake/alias/filter.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ import math
8
+
9
+ if 'sinc' in dir(torch):
10
+ sinc = torch.sinc
11
+ else:
12
+ # This code is adopted from adefossez's julius.core.sinc under the MIT License
13
+ # https://adefossez.github.io/julius/julius/core.html
14
+ # LICENSE is in incl_licenses directory.
15
+ def sinc(x: torch.Tensor):
16
+ """
17
+ Implementation of sinc, i.e. sin(pi * x) / (pi * x)
18
+ __Warning__: Different to julius.sinc, the input is multiplied by `pi`!
19
+ """
20
+ return torch.where(x == 0,
21
+ torch.tensor(1., device=x.device, dtype=x.dtype),
22
+ torch.sin(math.pi * x) / math.pi / x)
23
+
24
+
25
+ # This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License
26
+ # https://adefossez.github.io/julius/julius/lowpass.html
27
+ # LICENSE is in incl_licenses directory.
28
+ def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size]
29
+ even = (kernel_size % 2 == 0)
30
+ half_size = kernel_size // 2
31
+
32
+ #For kaiser window
33
+ delta_f = 4 * half_width
34
+ A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
35
+ if A > 50.:
36
+ beta = 0.1102 * (A - 8.7)
37
+ elif A >= 21.:
38
+ beta = 0.5842 * (A - 21)**0.4 + 0.07886 * (A - 21.)
39
+ else:
40
+ beta = 0.
41
+ window = torch.kaiser_window(kernel_size, beta=beta, periodic=False)
42
+
43
+ # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio
44
+ if even:
45
+ time = (torch.arange(-half_size, half_size) + 0.5)
46
+ else:
47
+ time = torch.arange(kernel_size) - half_size
48
+ if cutoff == 0:
49
+ filter_ = torch.zeros_like(time)
50
+ else:
51
+ filter_ = 2 * cutoff * window * sinc(2 * cutoff * time)
52
+ # Normalize filter to have sum = 1, otherwise we will have a small leakage
53
+ # of the constant component in the input signal.
54
+ filter_ /= filter_.sum()
55
+ filter = filter_.view(1, 1, kernel_size)
56
+
57
+ return filter
58
+
59
+
60
+ class LowPassFilter1d(nn.Module):
61
+ def __init__(self,
62
+ cutoff=0.5,
63
+ half_width=0.6,
64
+ stride: int = 1,
65
+ padding: bool = True,
66
+ padding_mode: str = 'replicate',
67
+ kernel_size: int = 12):
68
+ # kernel_size should be even number for stylegan3 setup,
69
+ # in this implementation, odd number is also possible.
70
+ super().__init__()
71
+ if cutoff < -0.:
72
+ raise ValueError("Minimum cutoff must be larger than zero.")
73
+ if cutoff > 0.5:
74
+ raise ValueError("A cutoff above 0.5 does not make sense.")
75
+ self.kernel_size = kernel_size
76
+ self.even = (kernel_size % 2 == 0)
77
+ self.pad_left = kernel_size // 2 - int(self.even)
78
+ self.pad_right = kernel_size // 2
79
+ self.stride = stride
80
+ self.padding = padding
81
+ self.padding_mode = padding_mode
82
+ filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
83
+ self.register_buffer("filter", filter)
84
+
85
+ #input [B, C, T]
86
+ def forward(self, x):
87
+ _, C, _ = x.shape
88
+
89
+ if self.padding:
90
+ x = F.pad(x, (self.pad_left, self.pad_right),
91
+ mode=self.padding_mode)
92
+ out = F.conv1d(x, self.filter.expand(C, -1, -1),
93
+ stride=self.stride, groups=C)
94
+
95
+ return out
vdecoder/hifiganwithsnake/alias/resample.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import torch.nn as nn
5
+ from torch.nn import functional as F
6
+ from .filter import LowPassFilter1d
7
+ from .filter import kaiser_sinc_filter1d
8
+
9
+
10
+ class UpSample1d(nn.Module):
11
+ def __init__(self, ratio=2, kernel_size=None):
12
+ super().__init__()
13
+ self.ratio = ratio
14
+ self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
15
+ self.stride = ratio
16
+ self.pad = self.kernel_size // ratio - 1
17
+ self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2
18
+ self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2
19
+ filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio,
20
+ half_width=0.6 / ratio,
21
+ kernel_size=self.kernel_size)
22
+ self.register_buffer("filter", filter)
23
+
24
+ # x: [B, C, T]
25
+ def forward(self, x):
26
+ _, C, _ = x.shape
27
+
28
+ x = F.pad(x, (self.pad, self.pad), mode='replicate')
29
+ x = self.ratio * F.conv_transpose1d(
30
+ x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C)
31
+ x = x[..., self.pad_left:-self.pad_right]
32
+
33
+ return x
34
+
35
+
36
+ class DownSample1d(nn.Module):
37
+ def __init__(self, ratio=2, kernel_size=None):
38
+ super().__init__()
39
+ self.ratio = ratio
40
+ self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
41
+ self.lowpass = LowPassFilter1d(cutoff=0.5 / ratio,
42
+ half_width=0.6 / ratio,
43
+ stride=ratio,
44
+ kernel_size=self.kernel_size)
45
+
46
+ def forward(self, x):
47
+ xx = self.lowpass(x)
48
+
49
+ return xx
vdecoder/hifiganwithsnake/env.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+
4
+
5
+ class AttrDict(dict):
6
+ def __init__(self, *args, **kwargs):
7
+ super(AttrDict, self).__init__(*args, **kwargs)
8
+ self.__dict__ = self
9
+
10
+
11
+ def build_env(config, config_name, path):
12
+ t_path = os.path.join(path, config_name)
13
+ if config != t_path:
14
+ os.makedirs(path, exist_ok=True)
15
+ shutil.copyfile(config, os.path.join(path, config_name))
vdecoder/hifiganwithsnake/models.py ADDED
@@ -0,0 +1,518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from .env import AttrDict
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn.functional as F
7
+ import torch.nn as nn
8
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
9
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
10
+ from .utils import init_weights, get_padding
11
+ from vdecoder.hifiganwithsnake.alias.act import SnakeAlias
12
+
13
+ LRELU_SLOPE = 0.1
14
+
15
+
16
+ def load_model(model_path, device='cuda'):
17
+ config_file = os.path.join(os.path.split(model_path)[0], 'config.json')
18
+ with open(config_file) as f:
19
+ data = f.read()
20
+
21
+ global h
22
+ json_config = json.loads(data)
23
+ h = AttrDict(json_config)
24
+
25
+ generator = Generator(h).to(device)
26
+
27
+ cp_dict = torch.load(model_path)
28
+ generator.load_state_dict(cp_dict['generator'])
29
+ generator.eval()
30
+ generator.remove_weight_norm()
31
+ del cp_dict
32
+ return generator, h
33
+
34
+
35
+ class ResBlock1(torch.nn.Module):
36
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
37
+ super(ResBlock1, self).__init__()
38
+ self.h = h
39
+ self.convs1 = nn.ModuleList([
40
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
41
+ padding=get_padding(kernel_size, dilation[0]))),
42
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
43
+ padding=get_padding(kernel_size, dilation[1]))),
44
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
45
+ padding=get_padding(kernel_size, dilation[2])))
46
+ ])
47
+ self.convs1.apply(init_weights)
48
+
49
+ self.convs2 = nn.ModuleList([
50
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
51
+ padding=get_padding(kernel_size, 1))),
52
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
53
+ padding=get_padding(kernel_size, 1))),
54
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
55
+ padding=get_padding(kernel_size, 1)))
56
+ ])
57
+ self.convs2.apply(init_weights)
58
+
59
+ self.num_layers = len(self.convs1) + len(self.convs2)
60
+ self.activations = nn.ModuleList([
61
+ SnakeAlias(channels) for _ in range(self.num_layers)
62
+ ])
63
+
64
+ def forward(self, x):
65
+ acts1, acts2 = self.activations[::2], self.activations[1::2]
66
+ for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
67
+ xt = a1(x)
68
+ xt = c1(xt)
69
+ xt = a2(xt)
70
+ xt = c2(xt)
71
+ x = xt + x
72
+ return x
73
+
74
+ def remove_weight_norm(self):
75
+ for l in self.convs1:
76
+ remove_weight_norm(l)
77
+ for l in self.convs2:
78
+ remove_weight_norm(l)
79
+
80
+
81
+ class ResBlock2(torch.nn.Module):
82
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
83
+ super(ResBlock2, self).__init__()
84
+ self.h = h
85
+ self.convs = nn.ModuleList([
86
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
87
+ padding=get_padding(kernel_size, dilation[0]))),
88
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
89
+ padding=get_padding(kernel_size, dilation[1])))
90
+ ])
91
+ self.convs.apply(init_weights)
92
+
93
+ self.num_layers = len(self.convs)
94
+ self.activations = nn.ModuleList([
95
+ SnakeAlias(channels) for _ in range(self.num_layers)
96
+ ])
97
+
98
+ def forward(self, x):
99
+ for c,a in zip(self.convs, self.activations):
100
+ xt = a(x)
101
+ xt = c(xt)
102
+ x = xt + x
103
+ return x
104
+
105
+ def remove_weight_norm(self):
106
+ for l in self.convs:
107
+ remove_weight_norm(l)
108
+
109
+
110
+ def padDiff(x):
111
+ return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0)
112
+
113
+ class SineGen(torch.nn.Module):
114
+ """ Definition of sine generator
115
+ SineGen(samp_rate, harmonic_num = 0,
116
+ sine_amp = 0.1, noise_std = 0.003,
117
+ voiced_threshold = 0,
118
+ flag_for_pulse=False)
119
+ samp_rate: sampling rate in Hz
120
+ harmonic_num: number of harmonic overtones (default 0)
121
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
122
+ noise_std: std of Gaussian noise (default 0.003)
123
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
124
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
125
+ Note: when flag_for_pulse is True, the first time step of a voiced
126
+ segment is always sin(np.pi) or cos(0)
127
+ """
128
+
129
+ def __init__(self, samp_rate, harmonic_num=0,
130
+ sine_amp=0.1, noise_std=0.003,
131
+ voiced_threshold=0,
132
+ flag_for_pulse=False):
133
+ super(SineGen, self).__init__()
134
+ self.sine_amp = sine_amp
135
+ self.noise_std = noise_std
136
+ self.harmonic_num = harmonic_num
137
+ self.dim = self.harmonic_num + 1
138
+ self.sampling_rate = samp_rate
139
+ self.voiced_threshold = voiced_threshold
140
+ self.flag_for_pulse = flag_for_pulse
141
+
142
+ def _f02uv(self, f0):
143
+ # generate uv signal
144
+ uv = (f0 > self.voiced_threshold).type(torch.float32)
145
+ return uv
146
+
147
+ def _f02sine(self, f0_values):
148
+ """ f0_values: (batchsize, length, dim)
149
+ where dim indicates fundamental tone and overtones
150
+ """
151
+ # convert to F0 in rad. The interger part n can be ignored
152
+ # because 2 * np.pi * n doesn't affect phase
153
+ rad_values = (f0_values / self.sampling_rate) % 1
154
+
155
+ # initial phase noise (no noise for fundamental component)
156
+ rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
157
+ device=f0_values.device)
158
+ rand_ini[:, 0] = 0
159
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
160
+
161
+ # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
162
+ if not self.flag_for_pulse:
163
+ # for normal case
164
+
165
+ # To prevent torch.cumsum numerical overflow,
166
+ # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
167
+ # Buffer tmp_over_one_idx indicates the time step to add -1.
168
+ # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
169
+ tmp_over_one = torch.cumsum(rad_values, 1) % 1
170
+ tmp_over_one_idx = (padDiff(tmp_over_one)) < 0
171
+ cumsum_shift = torch.zeros_like(rad_values)
172
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
173
+
174
+ sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
175
+ * 2 * np.pi)
176
+ else:
177
+ # If necessary, make sure that the first time step of every
178
+ # voiced segments is sin(pi) or cos(0)
179
+ # This is used for pulse-train generation
180
+
181
+ # identify the last time step in unvoiced segments
182
+ uv = self._f02uv(f0_values)
183
+ uv_1 = torch.roll(uv, shifts=-1, dims=1)
184
+ uv_1[:, -1, :] = 1
185
+ u_loc = (uv < 1) * (uv_1 > 0)
186
+
187
+ # get the instantanouse phase
188
+ tmp_cumsum = torch.cumsum(rad_values, dim=1)
189
+ # different batch needs to be processed differently
190
+ for idx in range(f0_values.shape[0]):
191
+ temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
192
+ temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
193
+ # stores the accumulation of i.phase within
194
+ # each voiced segments
195
+ tmp_cumsum[idx, :, :] = 0
196
+ tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
197
+
198
+ # rad_values - tmp_cumsum: remove the accumulation of i.phase
199
+ # within the previous voiced segment.
200
+ i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
201
+
202
+ # get the sines
203
+ sines = torch.cos(i_phase * 2 * np.pi)
204
+ return sines
205
+
206
+ def forward(self, f0):
207
+ """ sine_tensor, uv = forward(f0)
208
+ input F0: tensor(batchsize=1, length, dim=1)
209
+ f0 for unvoiced steps should be 0
210
+ output sine_tensor: tensor(batchsize=1, length, dim)
211
+ output uv: tensor(batchsize=1, length, 1)
212
+ """
213
+ with torch.no_grad():
214
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim,
215
+ device=f0.device)
216
+ # fundamental component
217
+ fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device))
218
+
219
+ # generate sine waveforms
220
+ sine_waves = self._f02sine(fn) * self.sine_amp
221
+
222
+ # generate uv signal
223
+ # uv = torch.ones(f0.shape)
224
+ # uv = uv * (f0 > self.voiced_threshold)
225
+ uv = self._f02uv(f0)
226
+
227
+ # noise: for unvoiced should be similar to sine_amp
228
+ # std = self.sine_amp/3 -> max value ~ self.sine_amp
229
+ # . for voiced regions is self.noise_std
230
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
231
+ noise = noise_amp * torch.randn_like(sine_waves)
232
+
233
+ # first: set the unvoiced part to 0 by uv
234
+ # then: additive noise
235
+ sine_waves = sine_waves * uv + noise
236
+ return sine_waves, uv, noise
237
+
238
+
239
+ class SourceModuleHnNSF(torch.nn.Module):
240
+ """ SourceModule for hn-nsf
241
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
242
+ add_noise_std=0.003, voiced_threshod=0)
243
+ sampling_rate: sampling_rate in Hz
244
+ harmonic_num: number of harmonic above F0 (default: 0)
245
+ sine_amp: amplitude of sine source signal (default: 0.1)
246
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
247
+ note that amplitude of noise in unvoiced is decided
248
+ by sine_amp
249
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
250
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
251
+ F0_sampled (batchsize, length, 1)
252
+ Sine_source (batchsize, length, 1)
253
+ noise_source (batchsize, length 1)
254
+ uv (batchsize, length, 1)
255
+ """
256
+
257
+ def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
258
+ add_noise_std=0.003, voiced_threshod=0):
259
+ super(SourceModuleHnNSF, self).__init__()
260
+
261
+ self.sine_amp = sine_amp
262
+ self.noise_std = add_noise_std
263
+
264
+ # to produce sine waveforms
265
+ self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
266
+ sine_amp, add_noise_std, voiced_threshod)
267
+
268
+ # to merge source harmonics into a single excitation
269
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
270
+ self.l_tanh = torch.nn.Tanh()
271
+
272
+ def forward(self, x):
273
+ """
274
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
275
+ F0_sampled (batchsize, length, 1)
276
+ Sine_source (batchsize, length, 1)
277
+ noise_source (batchsize, length 1)
278
+ """
279
+ # source for harmonic branch
280
+ sine_wavs, uv, _ = self.l_sin_gen(x)
281
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
282
+
283
+ # source for noise branch, in the same shape as uv
284
+ noise = torch.randn_like(uv) * self.sine_amp / 3
285
+ return sine_merge, noise, uv
286
+
287
+
288
+ class Generator(torch.nn.Module):
289
+ def __init__(self, h):
290
+ super(Generator, self).__init__()
291
+ self.h = h
292
+
293
+ self.num_kernels = len(h["resblock_kernel_sizes"])
294
+ self.num_upsamples = len(h["upsample_rates"])
295
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"]))
296
+ self.m_source = SourceModuleHnNSF(
297
+ sampling_rate=h["sampling_rate"],
298
+ harmonic_num=8)
299
+ self.noise_convs = nn.ModuleList()
300
+ self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3))
301
+ resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2
302
+ self.ups = nn.ModuleList()
303
+ for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])):
304
+ c_cur = h["upsample_initial_channel"] // (2 ** (i + 1))
305
+ self.ups.append(weight_norm(
306
+ ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)),
307
+ k, u, padding=(k - u) // 2)))
308
+ if i + 1 < len(h["upsample_rates"]): #
309
+ stride_f0 = np.prod(h["upsample_rates"][i + 1:])
310
+ self.noise_convs.append(Conv1d(
311
+ 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2))
312
+ else:
313
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
314
+ self.resblocks = nn.ModuleList()
315
+ self.snakes = nn.ModuleList()
316
+ for i in range(len(self.ups)):
317
+ ch = h["upsample_initial_channel"] // (2 ** (i + 1))
318
+ self.snakes.append(SnakeAlias(h["upsample_initial_channel"] // (2 ** (i))))
319
+ for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])):
320
+ self.resblocks.append(resblock(h, ch, k, d))
321
+
322
+ self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
323
+ self.ups.apply(init_weights)
324
+ self.conv_post.apply(init_weights)
325
+ self.snake_post = SnakeAlias(ch)
326
+ self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1)
327
+
328
+ def forward(self, x, f0, g=None):
329
+ # print(1,x.shape,f0.shape,f0[:, None].shape)
330
+ f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
331
+ # print(2,f0.shape)
332
+ har_source, noi_source, uv = self.m_source(f0)
333
+ har_source = har_source.transpose(1, 2)
334
+ x = self.conv_pre(x)
335
+ x = x + self.cond(g)
336
+ # print(124,x.shape,har_source.shape)
337
+ for i in range(self.num_upsamples):
338
+ x = self.snakes[i](x)
339
+ # print(3,x.shape)
340
+ x = self.ups[i](x)
341
+ x_source = self.noise_convs[i](har_source)
342
+ # print(4,x_source.shape,har_source.shape,x.shape)
343
+ x = x + x_source
344
+ xs = None
345
+ for j in range(self.num_kernels):
346
+ if xs is None:
347
+ xs = self.resblocks[i * self.num_kernels + j](x)
348
+ else:
349
+ xs += self.resblocks[i * self.num_kernels + j](x)
350
+ x = xs / self.num_kernels
351
+ x = self.snake_post(x)
352
+ x = self.conv_post(x)
353
+ x = torch.tanh(x)
354
+
355
+ return x
356
+
357
+ def remove_weight_norm(self):
358
+ print('Removing weight norm...')
359
+ for l in self.ups:
360
+ remove_weight_norm(l)
361
+ for l in self.resblocks:
362
+ l.remove_weight_norm()
363
+ remove_weight_norm(self.conv_pre)
364
+ remove_weight_norm(self.conv_post)
365
+
366
+
367
+ class DiscriminatorP(torch.nn.Module):
368
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
369
+ super(DiscriminatorP, self).__init__()
370
+ self.period = period
371
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
372
+ self.convs = nn.ModuleList([
373
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
374
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
375
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
376
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
377
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
378
+ ])
379
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
380
+
381
+ def forward(self, x):
382
+ fmap = []
383
+
384
+ # 1d to 2d
385
+ b, c, t = x.shape
386
+ if t % self.period != 0: # pad first
387
+ n_pad = self.period - (t % self.period)
388
+ x = F.pad(x, (0, n_pad), "reflect")
389
+ t = t + n_pad
390
+ x = x.view(b, c, t // self.period, self.period)
391
+
392
+ for l in self.convs:
393
+ x = l(x)
394
+ x = F.leaky_relu(x, LRELU_SLOPE)
395
+ fmap.append(x)
396
+ x = self.conv_post(x)
397
+ fmap.append(x)
398
+ x = torch.flatten(x, 1, -1)
399
+
400
+ return x, fmap
401
+
402
+
403
+ class MultiPeriodDiscriminator(torch.nn.Module):
404
+ def __init__(self, periods=None):
405
+ super(MultiPeriodDiscriminator, self).__init__()
406
+ self.periods = periods if periods is not None else [2, 3, 5, 7, 11]
407
+ self.discriminators = nn.ModuleList()
408
+ for period in self.periods:
409
+ self.discriminators.append(DiscriminatorP(period))
410
+
411
+ def forward(self, y, y_hat):
412
+ y_d_rs = []
413
+ y_d_gs = []
414
+ fmap_rs = []
415
+ fmap_gs = []
416
+ for i, d in enumerate(self.discriminators):
417
+ y_d_r, fmap_r = d(y)
418
+ y_d_g, fmap_g = d(y_hat)
419
+ y_d_rs.append(y_d_r)
420
+ fmap_rs.append(fmap_r)
421
+ y_d_gs.append(y_d_g)
422
+ fmap_gs.append(fmap_g)
423
+
424
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
425
+
426
+
427
+ class DiscriminatorS(torch.nn.Module):
428
+ def __init__(self, use_spectral_norm=False):
429
+ super(DiscriminatorS, self).__init__()
430
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
431
+ self.convs = nn.ModuleList([
432
+ norm_f(Conv1d(1, 128, 15, 1, padding=7)),
433
+ norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
434
+ norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
435
+ norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
436
+ norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
437
+ norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
438
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
439
+ ])
440
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
441
+
442
+ def forward(self, x):
443
+ fmap = []
444
+ for l in self.convs:
445
+ x = l(x)
446
+ x = F.leaky_relu(x, LRELU_SLOPE)
447
+ fmap.append(x)
448
+ x = self.conv_post(x)
449
+ fmap.append(x)
450
+ x = torch.flatten(x, 1, -1)
451
+
452
+ return x, fmap
453
+
454
+
455
+ class MultiScaleDiscriminator(torch.nn.Module):
456
+ def __init__(self):
457
+ super(MultiScaleDiscriminator, self).__init__()
458
+ self.discriminators = nn.ModuleList([
459
+ DiscriminatorS(use_spectral_norm=True),
460
+ DiscriminatorS(),
461
+ DiscriminatorS(),
462
+ ])
463
+ self.meanpools = nn.ModuleList([
464
+ AvgPool1d(4, 2, padding=2),
465
+ AvgPool1d(4, 2, padding=2)
466
+ ])
467
+
468
+ def forward(self, y, y_hat):
469
+ y_d_rs = []
470
+ y_d_gs = []
471
+ fmap_rs = []
472
+ fmap_gs = []
473
+ for i, d in enumerate(self.discriminators):
474
+ if i != 0:
475
+ y = self.meanpools[i - 1](y)
476
+ y_hat = self.meanpools[i - 1](y_hat)
477
+ y_d_r, fmap_r = d(y)
478
+ y_d_g, fmap_g = d(y_hat)
479
+ y_d_rs.append(y_d_r)
480
+ fmap_rs.append(fmap_r)
481
+ y_d_gs.append(y_d_g)
482
+ fmap_gs.append(fmap_g)
483
+
484
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
485
+
486
+
487
+ def feature_loss(fmap_r, fmap_g):
488
+ loss = 0
489
+ for dr, dg in zip(fmap_r, fmap_g):
490
+ for rl, gl in zip(dr, dg):
491
+ loss += torch.mean(torch.abs(rl - gl))
492
+
493
+ return loss * 2
494
+
495
+
496
+ def discriminator_loss(disc_real_outputs, disc_generated_outputs):
497
+ loss = 0
498
+ r_losses = []
499
+ g_losses = []
500
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
501
+ r_loss = torch.mean((1 - dr) ** 2)
502
+ g_loss = torch.mean(dg ** 2)
503
+ loss += (r_loss + g_loss)
504
+ r_losses.append(r_loss.item())
505
+ g_losses.append(g_loss.item())
506
+
507
+ return loss, r_losses, g_losses
508
+
509
+
510
+ def generator_loss(disc_outputs):
511
+ loss = 0
512
+ gen_losses = []
513
+ for dg in disc_outputs:
514
+ l = torch.mean((1 - dg) ** 2)
515
+ gen_losses.append(l)
516
+ loss += l
517
+
518
+ return loss, gen_losses
vdecoder/hifiganwithsnake/nvSTFT.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ os.environ["LRU_CACHE_CAPACITY"] = "3"
4
+ import random
5
+ import torch
6
+ import torch.utils.data
7
+ import numpy as np
8
+ import librosa
9
+ from librosa.util import normalize
10
+ from librosa.filters import mel as librosa_mel_fn
11
+ from scipy.io.wavfile import read
12
+ import soundfile as sf
13
+
14
+ def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
15
+ sampling_rate = None
16
+ try:
17
+ data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
18
+ except Exception as ex:
19
+ print(f"'{full_path}' failed to load.\nException:")
20
+ print(ex)
21
+ if return_empty_on_exception:
22
+ return [], sampling_rate or target_sr or 32000
23
+ else:
24
+ raise Exception(ex)
25
+
26
+ if len(data.shape) > 1:
27
+ data = data[:, 0]
28
+ assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
29
+
30
+ if np.issubdtype(data.dtype, np.integer): # if audio data is type int
31
+ max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
32
+ else: # if audio data is type fp32
33
+ max_mag = max(np.amax(data), -np.amin(data))
34
+ max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
35
+
36
+ data = torch.FloatTensor(data.astype(np.float32))/max_mag
37
+
38
+ if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
39
+ return [], sampling_rate or target_sr or 32000
40
+ if target_sr is not None and sampling_rate != target_sr:
41
+ data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
42
+ sampling_rate = target_sr
43
+
44
+ return data, sampling_rate
45
+
46
+ def dynamic_range_compression(x, C=1, clip_val=1e-5):
47
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
48
+
49
+ def dynamic_range_decompression(x, C=1):
50
+ return np.exp(x) / C
51
+
52
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
53
+ return torch.log(torch.clamp(x, min=clip_val) * C)
54
+
55
+ def dynamic_range_decompression_torch(x, C=1):
56
+ return torch.exp(x) / C
57
+
58
+ class STFT():
59
+ def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5):
60
+ self.target_sr = sr
61
+
62
+ self.n_mels = n_mels
63
+ self.n_fft = n_fft
64
+ self.win_size = win_size
65
+ self.hop_length = hop_length
66
+ self.fmin = fmin
67
+ self.fmax = fmax
68
+ self.clip_val = clip_val
69
+ self.mel_basis = {}
70
+ self.hann_window = {}
71
+
72
+ def get_mel(self, y, center=False):
73
+ sampling_rate = self.target_sr
74
+ n_mels = self.n_mels
75
+ n_fft = self.n_fft
76
+ win_size = self.win_size
77
+ hop_length = self.hop_length
78
+ fmin = self.fmin
79
+ fmax = self.fmax
80
+ clip_val = self.clip_val
81
+
82
+ if torch.min(y) < -1.:
83
+ print('min value is ', torch.min(y))
84
+ if torch.max(y) > 1.:
85
+ print('max value is ', torch.max(y))
86
+
87
+ if fmax not in self.mel_basis:
88
+ mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
89
+ self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
90
+ self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device)
91
+
92
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect')
93
+ y = y.squeeze(1)
94
+
95
+ spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)],
96
+ center=center, pad_mode='reflect', normalized=False, onesided=True)
97
+ # print(111,spec)
98
+ spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
99
+ # print(222,spec)
100
+ spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec)
101
+ # print(333,spec)
102
+ spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
103
+ # print(444,spec)
104
+ return spec
105
+
106
+ def __call__(self, audiopath):
107
+ audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
108
+ spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
109
+ return spect
110
+
111
+ stft = STFT()
vdecoder/hifiganwithsnake/utils.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ import matplotlib
4
+ import torch
5
+ from torch.nn.utils import weight_norm
6
+ # matplotlib.use("Agg")
7
+ import matplotlib.pylab as plt
8
+
9
+
10
+ def plot_spectrogram(spectrogram):
11
+ fig, ax = plt.subplots(figsize=(10, 2))
12
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower",
13
+ interpolation='none')
14
+ plt.colorbar(im, ax=ax)
15
+
16
+ fig.canvas.draw()
17
+ plt.close()
18
+
19
+ return fig
20
+
21
+
22
+ def init_weights(m, mean=0.0, std=0.01):
23
+ classname = m.__class__.__name__
24
+ if classname.find("Conv") != -1:
25
+ m.weight.data.normal_(mean, std)
26
+
27
+
28
+ def apply_weight_norm(m):
29
+ classname = m.__class__.__name__
30
+ if classname.find("Conv") != -1:
31
+ weight_norm(m)
32
+
33
+
34
+ def get_padding(kernel_size, dilation=1):
35
+ return int((kernel_size*dilation - dilation)/2)
36
+
37
+
38
+ def load_checkpoint(filepath, device):
39
+ assert os.path.isfile(filepath)
40
+ print("Loading '{}'".format(filepath))
41
+ checkpoint_dict = torch.load(filepath, map_location=device)
42
+ print("Complete.")
43
+ return checkpoint_dict
44
+
45
+
46
+ def save_checkpoint(filepath, obj):
47
+ print("Saving checkpoint to {}".format(filepath))
48
+ torch.save(obj, filepath)
49
+ print("Complete.")
50
+
51
+
52
+ def del_old_checkpoints(cp_dir, prefix, n_models=2):
53
+ pattern = os.path.join(cp_dir, prefix + '????????')
54
+ cp_list = glob.glob(pattern) # get checkpoint paths
55
+ cp_list = sorted(cp_list)# sort by iter
56
+ if len(cp_list) > n_models: # if more than n_models models are found
57
+ for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models
58
+ open(cp, 'w').close()# empty file contents
59
+ os.unlink(cp)# delete file (move to trash when using Colab)
60
+
61
+
62
+ def scan_checkpoint(cp_dir, prefix):
63
+ pattern = os.path.join(cp_dir, prefix + '????????')
64
+ cp_list = glob.glob(pattern)
65
+ if len(cp_list) == 0:
66
+ return None
67
+ return sorted(cp_list)[-1]
68
+
vdecoder/nsf_hifigan/env.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+
4
+
5
+ class AttrDict(dict):
6
+ def __init__(self, *args, **kwargs):
7
+ super(AttrDict, self).__init__(*args, **kwargs)
8
+ self.__dict__ = self
9
+
10
+
11
+ def build_env(config, config_name, path):
12
+ t_path = os.path.join(path, config_name)
13
+ if config != t_path:
14
+ os.makedirs(path, exist_ok=True)
15
+ shutil.copyfile(config, os.path.join(path, config_name))
vdecoder/nsf_hifigan/models.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from .env import AttrDict
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn.functional as F
7
+ import torch.nn as nn
8
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
9
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
10
+ from .utils import init_weights, get_padding
11
+
12
+ LRELU_SLOPE = 0.1
13
+
14
+
15
+ def load_model(model_path, device='cuda'):
16
+ h = load_config(model_path)
17
+
18
+ generator = Generator(h).to(device)
19
+
20
+ cp_dict = torch.load(model_path, map_location=device)
21
+ generator.load_state_dict(cp_dict['generator'])
22
+ generator.eval()
23
+ generator.remove_weight_norm()
24
+ del cp_dict
25
+ return generator, h
26
+
27
+ def load_config(model_path):
28
+ config_file = os.path.join(os.path.split(model_path)[0], 'config.json')
29
+ with open(config_file) as f:
30
+ data = f.read()
31
+
32
+ json_config = json.loads(data)
33
+ h = AttrDict(json_config)
34
+ return h
35
+
36
+
37
+ class ResBlock1(torch.nn.Module):
38
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
39
+ super(ResBlock1, self).__init__()
40
+ self.h = h
41
+ self.convs1 = nn.ModuleList([
42
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
43
+ padding=get_padding(kernel_size, dilation[0]))),
44
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
45
+ padding=get_padding(kernel_size, dilation[1]))),
46
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
47
+ padding=get_padding(kernel_size, dilation[2])))
48
+ ])
49
+ self.convs1.apply(init_weights)
50
+
51
+ self.convs2 = nn.ModuleList([
52
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
53
+ padding=get_padding(kernel_size, 1))),
54
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
55
+ padding=get_padding(kernel_size, 1))),
56
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
57
+ padding=get_padding(kernel_size, 1)))
58
+ ])
59
+ self.convs2.apply(init_weights)
60
+
61
+ def forward(self, x):
62
+ for c1, c2 in zip(self.convs1, self.convs2):
63
+ xt = F.leaky_relu(x, LRELU_SLOPE)
64
+ xt = c1(xt)
65
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
66
+ xt = c2(xt)
67
+ x = xt + x
68
+ return x
69
+
70
+ def remove_weight_norm(self):
71
+ for l in self.convs1:
72
+ remove_weight_norm(l)
73
+ for l in self.convs2:
74
+ remove_weight_norm(l)
75
+
76
+
77
+ class ResBlock2(torch.nn.Module):
78
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
79
+ super(ResBlock2, self).__init__()
80
+ self.h = h
81
+ self.convs = nn.ModuleList([
82
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
83
+ padding=get_padding(kernel_size, dilation[0]))),
84
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
85
+ padding=get_padding(kernel_size, dilation[1])))
86
+ ])
87
+ self.convs.apply(init_weights)
88
+
89
+ def forward(self, x):
90
+ for c in self.convs:
91
+ xt = F.leaky_relu(x, LRELU_SLOPE)
92
+ xt = c(xt)
93
+ x = xt + x
94
+ return x
95
+
96
+ def remove_weight_norm(self):
97
+ for l in self.convs:
98
+ remove_weight_norm(l)
99
+
100
+
101
+ class SineGen(torch.nn.Module):
102
+ """ Definition of sine generator
103
+ SineGen(samp_rate, harmonic_num = 0,
104
+ sine_amp = 0.1, noise_std = 0.003,
105
+ voiced_threshold = 0,
106
+ flag_for_pulse=False)
107
+ samp_rate: sampling rate in Hz
108
+ harmonic_num: number of harmonic overtones (default 0)
109
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
110
+ noise_std: std of Gaussian noise (default 0.003)
111
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
112
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
113
+ Note: when flag_for_pulse is True, the first time step of a voiced
114
+ segment is always sin(np.pi) or cos(0)
115
+ """
116
+
117
+ def __init__(self, samp_rate, harmonic_num=0,
118
+ sine_amp=0.1, noise_std=0.003,
119
+ voiced_threshold=0):
120
+ super(SineGen, self).__init__()
121
+ self.sine_amp = sine_amp
122
+ self.noise_std = noise_std
123
+ self.harmonic_num = harmonic_num
124
+ self.dim = self.harmonic_num + 1
125
+ self.sampling_rate = samp_rate
126
+ self.voiced_threshold = voiced_threshold
127
+
128
+ def _f02uv(self, f0):
129
+ # generate uv signal
130
+ uv = torch.ones_like(f0)
131
+ uv = uv * (f0 > self.voiced_threshold)
132
+ return uv
133
+
134
+ @torch.no_grad()
135
+ def forward(self, f0, upp):
136
+ """ sine_tensor, uv = forward(f0)
137
+ input F0: tensor(batchsize=1, length, dim=1)
138
+ f0 for unvoiced steps should be 0
139
+ output sine_tensor: tensor(batchsize=1, length, dim)
140
+ output uv: tensor(batchsize=1, length, 1)
141
+ """
142
+ f0 = f0.unsqueeze(-1)
143
+ fn = torch.multiply(f0, torch.arange(1, self.dim + 1, device=f0.device).reshape((1, 1, -1)))
144
+ rad_values = (fn / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
145
+ rand_ini = torch.rand(fn.shape[0], fn.shape[2], device=fn.device)
146
+ rand_ini[:, 0] = 0
147
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
148
+ is_half = rad_values.dtype is not torch.float32
149
+ tmp_over_one = torch.cumsum(rad_values.double(), 1) # % 1 #####%1意味着后面的cumsum无法再优化
150
+ if is_half:
151
+ tmp_over_one = tmp_over_one.half()
152
+ else:
153
+ tmp_over_one = tmp_over_one.float()
154
+ tmp_over_one *= upp
155
+ tmp_over_one = F.interpolate(
156
+ tmp_over_one.transpose(2, 1), scale_factor=upp,
157
+ mode='linear', align_corners=True
158
+ ).transpose(2, 1)
159
+ rad_values = F.interpolate(rad_values.transpose(2, 1), scale_factor=upp, mode='nearest').transpose(2, 1)
160
+ tmp_over_one %= 1
161
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
162
+ cumsum_shift = torch.zeros_like(rad_values)
163
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
164
+ rad_values = rad_values.double()
165
+ cumsum_shift = cumsum_shift.double()
166
+ sine_waves = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi)
167
+ if is_half:
168
+ sine_waves = sine_waves.half()
169
+ else:
170
+ sine_waves = sine_waves.float()
171
+ sine_waves = sine_waves * self.sine_amp
172
+ uv = self._f02uv(f0)
173
+ uv = F.interpolate(uv.transpose(2, 1), scale_factor=upp, mode='nearest').transpose(2, 1)
174
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
175
+ noise = noise_amp * torch.randn_like(sine_waves)
176
+ sine_waves = sine_waves * uv + noise
177
+ return sine_waves, uv, noise
178
+
179
+
180
+ class SourceModuleHnNSF(torch.nn.Module):
181
+ """ SourceModule for hn-nsf
182
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
183
+ add_noise_std=0.003, voiced_threshod=0)
184
+ sampling_rate: sampling_rate in Hz
185
+ harmonic_num: number of harmonic above F0 (default: 0)
186
+ sine_amp: amplitude of sine source signal (default: 0.1)
187
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
188
+ note that amplitude of noise in unvoiced is decided
189
+ by sine_amp
190
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
191
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
192
+ F0_sampled (batchsize, length, 1)
193
+ Sine_source (batchsize, length, 1)
194
+ noise_source (batchsize, length 1)
195
+ uv (batchsize, length, 1)
196
+ """
197
+
198
+ def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
199
+ add_noise_std=0.003, voiced_threshod=0):
200
+ super(SourceModuleHnNSF, self).__init__()
201
+
202
+ self.sine_amp = sine_amp
203
+ self.noise_std = add_noise_std
204
+
205
+ # to produce sine waveforms
206
+ self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
207
+ sine_amp, add_noise_std, voiced_threshod)
208
+
209
+ # to merge source harmonics into a single excitation
210
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
211
+ self.l_tanh = torch.nn.Tanh()
212
+
213
+ def forward(self, x, upp):
214
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
215
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
216
+ return sine_merge
217
+
218
+
219
+ class Generator(torch.nn.Module):
220
+ def __init__(self, h):
221
+ super(Generator, self).__init__()
222
+ self.h = h
223
+ self.num_kernels = len(h.resblock_kernel_sizes)
224
+ self.num_upsamples = len(h.upsample_rates)
225
+ self.m_source = SourceModuleHnNSF(
226
+ sampling_rate=h.sampling_rate,
227
+ harmonic_num=8
228
+ )
229
+ self.noise_convs = nn.ModuleList()
230
+ self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
231
+ resblock = ResBlock1 if h.resblock == '1' else ResBlock2
232
+
233
+ self.ups = nn.ModuleList()
234
+ for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
235
+ c_cur = h.upsample_initial_channel // (2 ** (i + 1))
236
+ self.ups.append(weight_norm(
237
+ ConvTranspose1d(h.upsample_initial_channel // (2 ** i), h.upsample_initial_channel // (2 ** (i + 1)),
238
+ k, u, padding=(k - u) // 2)))
239
+ if i + 1 < len(h.upsample_rates): #
240
+ stride_f0 = int(np.prod(h.upsample_rates[i + 1:]))
241
+ self.noise_convs.append(Conv1d(
242
+ 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2))
243
+ else:
244
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
245
+ self.resblocks = nn.ModuleList()
246
+ ch = h.upsample_initial_channel
247
+ for i in range(len(self.ups)):
248
+ ch //= 2
249
+ for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
250
+ self.resblocks.append(resblock(h, ch, k, d))
251
+
252
+ self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
253
+ self.ups.apply(init_weights)
254
+ self.conv_post.apply(init_weights)
255
+ self.upp = int(np.prod(h.upsample_rates))
256
+
257
+ def forward(self, x, f0):
258
+ har_source = self.m_source(f0, self.upp).transpose(1, 2)
259
+ x = self.conv_pre(x)
260
+ for i in range(self.num_upsamples):
261
+ x = F.leaky_relu(x, LRELU_SLOPE)
262
+ x = self.ups[i](x)
263
+ x_source = self.noise_convs[i](har_source)
264
+ x = x + x_source
265
+ xs = None
266
+ for j in range(self.num_kernels):
267
+ if xs is None:
268
+ xs = self.resblocks[i * self.num_kernels + j](x)
269
+ else:
270
+ xs += self.resblocks[i * self.num_kernels + j](x)
271
+ x = xs / self.num_kernels
272
+ x = F.leaky_relu(x)
273
+ x = self.conv_post(x)
274
+ x = torch.tanh(x)
275
+
276
+ return x
277
+
278
+ def remove_weight_norm(self):
279
+ print('Removing weight norm...')
280
+ for l in self.ups:
281
+ remove_weight_norm(l)
282
+ for l in self.resblocks:
283
+ l.remove_weight_norm()
284
+ remove_weight_norm(self.conv_pre)
285
+ remove_weight_norm(self.conv_post)
286
+
287
+
288
+ class DiscriminatorP(torch.nn.Module):
289
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
290
+ super(DiscriminatorP, self).__init__()
291
+ self.period = period
292
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
293
+ self.convs = nn.ModuleList([
294
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
295
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
296
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
297
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
298
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
299
+ ])
300
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
301
+
302
+ def forward(self, x):
303
+ fmap = []
304
+
305
+ # 1d to 2d
306
+ b, c, t = x.shape
307
+ if t % self.period != 0: # pad first
308
+ n_pad = self.period - (t % self.period)
309
+ x = F.pad(x, (0, n_pad), "reflect")
310
+ t = t + n_pad
311
+ x = x.view(b, c, t // self.period, self.period)
312
+
313
+ for l in self.convs:
314
+ x = l(x)
315
+ x = F.leaky_relu(x, LRELU_SLOPE)
316
+ fmap.append(x)
317
+ x = self.conv_post(x)
318
+ fmap.append(x)
319
+ x = torch.flatten(x, 1, -1)
320
+
321
+ return x, fmap
322
+
323
+
324
+ class MultiPeriodDiscriminator(torch.nn.Module):
325
+ def __init__(self, periods=None):
326
+ super(MultiPeriodDiscriminator, self).__init__()
327
+ self.periods = periods if periods is not None else [2, 3, 5, 7, 11]
328
+ self.discriminators = nn.ModuleList()
329
+ for period in self.periods:
330
+ self.discriminators.append(DiscriminatorP(period))
331
+
332
+ def forward(self, y, y_hat):
333
+ y_d_rs = []
334
+ y_d_gs = []
335
+ fmap_rs = []
336
+ fmap_gs = []
337
+ for i, d in enumerate(self.discriminators):
338
+ y_d_r, fmap_r = d(y)
339
+ y_d_g, fmap_g = d(y_hat)
340
+ y_d_rs.append(y_d_r)
341
+ fmap_rs.append(fmap_r)
342
+ y_d_gs.append(y_d_g)
343
+ fmap_gs.append(fmap_g)
344
+
345
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
346
+
347
+
348
+ class DiscriminatorS(torch.nn.Module):
349
+ def __init__(self, use_spectral_norm=False):
350
+ super(DiscriminatorS, self).__init__()
351
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
352
+ self.convs = nn.ModuleList([
353
+ norm_f(Conv1d(1, 128, 15, 1, padding=7)),
354
+ norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
355
+ norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
356
+ norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
357
+ norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
358
+ norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
359
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
360
+ ])
361
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
362
+
363
+ def forward(self, x):
364
+ fmap = []
365
+ for l in self.convs:
366
+ x = l(x)
367
+ x = F.leaky_relu(x, LRELU_SLOPE)
368
+ fmap.append(x)
369
+ x = self.conv_post(x)
370
+ fmap.append(x)
371
+ x = torch.flatten(x, 1, -1)
372
+
373
+ return x, fmap
374
+
375
+
376
+ class MultiScaleDiscriminator(torch.nn.Module):
377
+ def __init__(self):
378
+ super(MultiScaleDiscriminator, self).__init__()
379
+ self.discriminators = nn.ModuleList([
380
+ DiscriminatorS(use_spectral_norm=True),
381
+ DiscriminatorS(),
382
+ DiscriminatorS(),
383
+ ])
384
+ self.meanpools = nn.ModuleList([
385
+ AvgPool1d(4, 2, padding=2),
386
+ AvgPool1d(4, 2, padding=2)
387
+ ])
388
+
389
+ def forward(self, y, y_hat):
390
+ y_d_rs = []
391
+ y_d_gs = []
392
+ fmap_rs = []
393
+ fmap_gs = []
394
+ for i, d in enumerate(self.discriminators):
395
+ if i != 0:
396
+ y = self.meanpools[i - 1](y)
397
+ y_hat = self.meanpools[i - 1](y_hat)
398
+ y_d_r, fmap_r = d(y)
399
+ y_d_g, fmap_g = d(y_hat)
400
+ y_d_rs.append(y_d_r)
401
+ fmap_rs.append(fmap_r)
402
+ y_d_gs.append(y_d_g)
403
+ fmap_gs.append(fmap_g)
404
+
405
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
406
+
407
+
408
+ def feature_loss(fmap_r, fmap_g):
409
+ loss = 0
410
+ for dr, dg in zip(fmap_r, fmap_g):
411
+ for rl, gl in zip(dr, dg):
412
+ loss += torch.mean(torch.abs(rl - gl))
413
+
414
+ return loss * 2
415
+
416
+
417
+ def discriminator_loss(disc_real_outputs, disc_generated_outputs):
418
+ loss = 0
419
+ r_losses = []
420
+ g_losses = []
421
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
422
+ r_loss = torch.mean((1 - dr) ** 2)
423
+ g_loss = torch.mean(dg ** 2)
424
+ loss += (r_loss + g_loss)
425
+ r_losses.append(r_loss.item())
426
+ g_losses.append(g_loss.item())
427
+
428
+ return loss, r_losses, g_losses
429
+
430
+
431
+ def generator_loss(disc_outputs):
432
+ loss = 0
433
+ gen_losses = []
434
+ for dg in disc_outputs:
435
+ l = torch.mean((1 - dg) ** 2)
436
+ gen_losses.append(l)
437
+ loss += l
438
+
439
+ return loss, gen_losses
vdecoder/nsf_hifigan/nvSTFT.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ os.environ["LRU_CACHE_CAPACITY"] = "3"
4
+ import random
5
+ import torch
6
+ import torch.utils.data
7
+ import numpy as np
8
+ import librosa
9
+ from librosa.util import normalize
10
+ from librosa.filters import mel as librosa_mel_fn
11
+ from scipy.io.wavfile import read
12
+ import soundfile as sf
13
+ import torch.nn.functional as F
14
+
15
+ def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
16
+ sampling_rate = None
17
+ try:
18
+ data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
19
+ except Exception as ex:
20
+ print(f"'{full_path}' failed to load.\nException:")
21
+ print(ex)
22
+ if return_empty_on_exception:
23
+ return [], sampling_rate or target_sr or 48000
24
+ else:
25
+ raise Exception(ex)
26
+
27
+ if len(data.shape) > 1:
28
+ data = data[:, 0]
29
+ assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
30
+
31
+ if np.issubdtype(data.dtype, np.integer): # if audio data is type int
32
+ max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
33
+ else: # if audio data is type fp32
34
+ max_mag = max(np.amax(data), -np.amin(data))
35
+ max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
36
+
37
+ data = torch.FloatTensor(data.astype(np.float32))/max_mag
38
+
39
+ if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
40
+ return [], sampling_rate or target_sr or 48000
41
+ if target_sr is not None and sampling_rate != target_sr:
42
+ data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
43
+ sampling_rate = target_sr
44
+
45
+ return data, sampling_rate
46
+
47
+ def dynamic_range_compression(x, C=1, clip_val=1e-5):
48
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
49
+
50
+ def dynamic_range_decompression(x, C=1):
51
+ return np.exp(x) / C
52
+
53
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
54
+ return torch.log(torch.clamp(x, min=clip_val) * C)
55
+
56
+ def dynamic_range_decompression_torch(x, C=1):
57
+ return torch.exp(x) / C
58
+
59
+ class STFT():
60
+ def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5):
61
+ self.target_sr = sr
62
+
63
+ self.n_mels = n_mels
64
+ self.n_fft = n_fft
65
+ self.win_size = win_size
66
+ self.hop_length = hop_length
67
+ self.fmin = fmin
68
+ self.fmax = fmax
69
+ self.clip_val = clip_val
70
+ self.mel_basis = {}
71
+ self.hann_window = {}
72
+
73
+ def get_mel(self, y, keyshift=0, speed=1, center=False):
74
+ sampling_rate = self.target_sr
75
+ n_mels = self.n_mels
76
+ n_fft = self.n_fft
77
+ win_size = self.win_size
78
+ hop_length = self.hop_length
79
+ fmin = self.fmin
80
+ fmax = self.fmax
81
+ clip_val = self.clip_val
82
+
83
+ factor = 2 ** (keyshift / 12)
84
+ n_fft_new = int(np.round(n_fft * factor))
85
+ win_size_new = int(np.round(win_size * factor))
86
+ hop_length_new = int(np.round(hop_length * speed))
87
+
88
+ if torch.min(y) < -1.:
89
+ print('min value is ', torch.min(y))
90
+ if torch.max(y) > 1.:
91
+ print('max value is ', torch.max(y))
92
+
93
+ mel_basis_key = str(fmax)+'_'+str(y.device)
94
+ if mel_basis_key not in self.mel_basis:
95
+ mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
96
+ self.mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)
97
+
98
+ keyshift_key = str(keyshift)+'_'+str(y.device)
99
+ if keyshift_key not in self.hann_window:
100
+ self.hann_window[keyshift_key] = torch.hann_window(win_size_new).to(y.device)
101
+
102
+ pad_left = (win_size_new - hop_length_new) //2
103
+ pad_right = max((win_size_new- hop_length_new + 1) //2, win_size_new - y.size(-1) - pad_left)
104
+ if pad_right < y.size(-1):
105
+ mode = 'reflect'
106
+ else:
107
+ mode = 'constant'
108
+ y = torch.nn.functional.pad(y.unsqueeze(1), (pad_left, pad_right), mode = mode)
109
+ y = y.squeeze(1)
110
+
111
+ spec = torch.stft(y, n_fft_new, hop_length=hop_length_new, win_length=win_size_new, window=self.hann_window[keyshift_key],
112
+ center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
113
+ # print(111,spec)
114
+ spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
115
+ if keyshift != 0:
116
+ size = n_fft // 2 + 1
117
+ resize = spec.size(1)
118
+ if resize < size:
119
+ spec = F.pad(spec, (0, 0, 0, size-resize))
120
+ spec = spec[:, :size, :] * win_size / win_size_new
121
+
122
+ # print(222,spec)
123
+ spec = torch.matmul(self.mel_basis[mel_basis_key], spec)
124
+ # print(333,spec)
125
+ spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
126
+ # print(444,spec)
127
+ return spec
128
+
129
+ def __call__(self, audiopath):
130
+ audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
131
+ spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
132
+ return spect
133
+
134
+ stft = STFT()
vdecoder/nsf_hifigan/utils.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ import matplotlib
4
+ import torch
5
+ from torch.nn.utils import weight_norm
6
+ matplotlib.use("Agg")
7
+ import matplotlib.pylab as plt
8
+
9
+
10
+ def plot_spectrogram(spectrogram):
11
+ fig, ax = plt.subplots(figsize=(10, 2))
12
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower",
13
+ interpolation='none')
14
+ plt.colorbar(im, ax=ax)
15
+
16
+ fig.canvas.draw()
17
+ plt.close()
18
+
19
+ return fig
20
+
21
+
22
+ def init_weights(m, mean=0.0, std=0.01):
23
+ classname = m.__class__.__name__
24
+ if classname.find("Conv") != -1:
25
+ m.weight.data.normal_(mean, std)
26
+
27
+
28
+ def apply_weight_norm(m):
29
+ classname = m.__class__.__name__
30
+ if classname.find("Conv") != -1:
31
+ weight_norm(m)
32
+
33
+
34
+ def get_padding(kernel_size, dilation=1):
35
+ return int((kernel_size*dilation - dilation)/2)
36
+
37
+
38
+ def load_checkpoint(filepath, device):
39
+ assert os.path.isfile(filepath)
40
+ print("Loading '{}'".format(filepath))
41
+ checkpoint_dict = torch.load(filepath, map_location=device)
42
+ print("Complete.")
43
+ return checkpoint_dict
44
+
45
+
46
+ def save_checkpoint(filepath, obj):
47
+ print("Saving checkpoint to {}".format(filepath))
48
+ torch.save(obj, filepath)
49
+ print("Complete.")
50
+
51
+
52
+ def del_old_checkpoints(cp_dir, prefix, n_models=2):
53
+ pattern = os.path.join(cp_dir, prefix + '????????')
54
+ cp_list = glob.glob(pattern) # get checkpoint paths
55
+ cp_list = sorted(cp_list)# sort by iter
56
+ if len(cp_list) > n_models: # if more than n_models models are found
57
+ for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models
58
+ open(cp, 'w').close()# empty file contents
59
+ os.unlink(cp)# delete file (move to trash when using Colab)
60
+
61
+
62
+ def scan_checkpoint(cp_dir, prefix):
63
+ pattern = os.path.join(cp_dir, prefix + '????????')
64
+ cp_list = glob.glob(pattern)
65
+ if len(cp_list) == 0:
66
+ return None
67
+ return sorted(cp_list)[-1]
68
+