Staticaliza commited on
Commit
089208d
·
verified ·
1 Parent(s): ce2f474

Delete modules

Browse files
modules/audio.py DELETED
@@ -1,82 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.utils.data
4
- from librosa.filters import mel as librosa_mel_fn
5
- from scipy.io.wavfile import read
6
-
7
- MAX_WAV_VALUE = 32768.0
8
-
9
-
10
- def load_wav(full_path):
11
- sampling_rate, data = read(full_path)
12
- return data, sampling_rate
13
-
14
-
15
- def dynamic_range_compression(x, C=1, clip_val=1e-5):
16
- return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
17
-
18
-
19
- def dynamic_range_decompression(x, C=1):
20
- return np.exp(x) / C
21
-
22
-
23
- def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
24
- return torch.log(torch.clamp(x, min=clip_val) * C)
25
-
26
-
27
- def dynamic_range_decompression_torch(x, C=1):
28
- return torch.exp(x) / C
29
-
30
-
31
- def spectral_normalize_torch(magnitudes):
32
- output = dynamic_range_compression_torch(magnitudes)
33
- return output
34
-
35
-
36
- def spectral_de_normalize_torch(magnitudes):
37
- output = dynamic_range_decompression_torch(magnitudes)
38
- return output
39
-
40
-
41
- mel_basis = {}
42
- hann_window = {}
43
-
44
-
45
- def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
46
- if torch.min(y) < -1.0:
47
- print("min value is ", torch.min(y))
48
- if torch.max(y) > 1.0:
49
- print("max value is ", torch.max(y))
50
-
51
- global mel_basis, hann_window # pylint: disable=global-statement
52
- if f"{str(sampling_rate)}_{str(fmax)}_{str(y.device)}" not in mel_basis:
53
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
54
- mel_basis[str(sampling_rate) + "_" + str(fmax) + "_" + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
55
- hann_window[str(sampling_rate) + "_" + str(y.device)] = torch.hann_window(win_size).to(y.device)
56
-
57
- y = torch.nn.functional.pad(
58
- y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect"
59
- )
60
- y = y.squeeze(1)
61
-
62
- spec = torch.view_as_real(
63
- torch.stft(
64
- y,
65
- n_fft,
66
- hop_length=hop_size,
67
- win_length=win_size,
68
- window=hann_window[str(sampling_rate) + "_" + str(y.device)],
69
- center=center,
70
- pad_mode="reflect",
71
- normalized=False,
72
- onesided=True,
73
- return_complex=True,
74
- )
75
- )
76
-
77
- spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
78
-
79
- spec = torch.matmul(mel_basis[str(sampling_rate) + "_" + str(fmax) + "_" + str(y.device)], spec)
80
- spec = spectral_normalize_torch(spec)
81
-
82
- return spec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/bigvgan/act.py DELETED
@@ -1,30 +0,0 @@
1
- # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
- # LICENSE is in incl_licenses directory.
3
-
4
- import torch.nn as nn
5
- from .resample import UpSample1d, DownSample1d
6
-
7
-
8
- class Activation1d(nn.Module):
9
- def __init__(
10
- self,
11
- activation,
12
- up_ratio: int = 2,
13
- down_ratio: int = 2,
14
- up_kernel_size: int = 12,
15
- down_kernel_size: int = 12,
16
- ):
17
- super().__init__()
18
- self.up_ratio = up_ratio
19
- self.down_ratio = down_ratio
20
- self.act = activation
21
- self.upsample = UpSample1d(up_ratio, up_kernel_size)
22
- self.downsample = DownSample1d(down_ratio, down_kernel_size)
23
-
24
- # x: [B,C,T]
25
- def forward(self, x):
26
- x = self.upsample(x)
27
- x = self.act(x)
28
- x = self.downsample(x)
29
-
30
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/bigvgan/activations.py DELETED
@@ -1,120 +0,0 @@
1
- # Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
2
- # LICENSE is in incl_licenses directory.
3
-
4
- import torch
5
- from torch import nn, sin, pow
6
- from torch.nn import Parameter
7
-
8
-
9
- class Snake(nn.Module):
10
- '''
11
- Implementation of a sine-based periodic activation function
12
- Shape:
13
- - Input: (B, C, T)
14
- - Output: (B, C, T), same shape as the input
15
- Parameters:
16
- - alpha - trainable parameter
17
- References:
18
- - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
19
- https://arxiv.org/abs/2006.08195
20
- Examples:
21
- >>> a1 = snake(256)
22
- >>> x = torch.randn(256)
23
- >>> x = a1(x)
24
- '''
25
- def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
26
- '''
27
- Initialization.
28
- INPUT:
29
- - in_features: shape of the input
30
- - alpha: trainable parameter
31
- alpha is initialized to 1 by default, higher values = higher-frequency.
32
- alpha will be trained along with the rest of your model.
33
- '''
34
- super(Snake, self).__init__()
35
- self.in_features = in_features
36
-
37
- # initialize alpha
38
- self.alpha_logscale = alpha_logscale
39
- if self.alpha_logscale: # log scale alphas initialized to zeros
40
- self.alpha = Parameter(torch.zeros(in_features) * alpha)
41
- else: # linear scale alphas initialized to ones
42
- self.alpha = Parameter(torch.ones(in_features) * alpha)
43
-
44
- self.alpha.requires_grad = alpha_trainable
45
-
46
- self.no_div_by_zero = 0.000000001
47
-
48
- def forward(self, x):
49
- '''
50
- Forward pass of the function.
51
- Applies the function to the input elementwise.
52
- Snake ∶= x + 1/a * sin^2 (xa)
53
- '''
54
- alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
55
- if self.alpha_logscale:
56
- alpha = torch.exp(alpha)
57
- x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
58
-
59
- return x
60
-
61
-
62
- class SnakeBeta(nn.Module):
63
- '''
64
- A modified Snake function which uses separate parameters for the magnitude of the periodic components
65
- Shape:
66
- - Input: (B, C, T)
67
- - Output: (B, C, T), same shape as the input
68
- Parameters:
69
- - alpha - trainable parameter that controls frequency
70
- - beta - trainable parameter that controls magnitude
71
- References:
72
- - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
73
- https://arxiv.org/abs/2006.08195
74
- Examples:
75
- >>> a1 = snakebeta(256)
76
- >>> x = torch.randn(256)
77
- >>> x = a1(x)
78
- '''
79
- def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
80
- '''
81
- Initialization.
82
- INPUT:
83
- - in_features: shape of the input
84
- - alpha - trainable parameter that controls frequency
85
- - beta - trainable parameter that controls magnitude
86
- alpha is initialized to 1 by default, higher values = higher-frequency.
87
- beta is initialized to 1 by default, higher values = higher-magnitude.
88
- alpha will be trained along with the rest of your model.
89
- '''
90
- super(SnakeBeta, self).__init__()
91
- self.in_features = in_features
92
-
93
- # initialize alpha
94
- self.alpha_logscale = alpha_logscale
95
- if self.alpha_logscale: # log scale alphas initialized to zeros
96
- self.alpha = Parameter(torch.zeros(in_features) * alpha)
97
- self.beta = Parameter(torch.zeros(in_features) * alpha)
98
- else: # linear scale alphas initialized to ones
99
- self.alpha = Parameter(torch.ones(in_features) * alpha)
100
- self.beta = Parameter(torch.ones(in_features) * alpha)
101
-
102
- self.alpha.requires_grad = alpha_trainable
103
- self.beta.requires_grad = alpha_trainable
104
-
105
- self.no_div_by_zero = 0.000000001
106
-
107
- def forward(self, x):
108
- '''
109
- Forward pass of the function.
110
- Applies the function to the input elementwise.
111
- SnakeBeta ∶= x + 1/b * sin^2 (xa)
112
- '''
113
- alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
114
- beta = self.beta.unsqueeze(0).unsqueeze(-1)
115
- if self.alpha_logscale:
116
- alpha = torch.exp(alpha)
117
- beta = torch.exp(beta)
118
- x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
119
-
120
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/bigvgan/bigvgan.py DELETED
@@ -1,492 +0,0 @@
1
- # Copyright (c) 2024 NVIDIA CORPORATION.
2
- # Licensed under the MIT license.
3
-
4
- # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
- # LICENSE is in incl_licenses directory.
6
-
7
- import os
8
- import json
9
- from pathlib import Path
10
- from typing import Optional, Union, Dict
11
-
12
- import torch
13
- import torch.nn as nn
14
- from torch.nn import Conv1d, ConvTranspose1d
15
- from torch.nn.utils import weight_norm, remove_weight_norm
16
-
17
- from . import activations
18
- from .utils import init_weights, get_padding
19
- from .act import Activation1d as TorchActivation1d
20
- from .env import AttrDict
21
-
22
- from huggingface_hub import PyTorchModelHubMixin, hf_hub_download
23
-
24
-
25
- def load_hparams_from_json(path) -> AttrDict:
26
- with open(path) as f:
27
- data = f.read()
28
- return AttrDict(json.loads(data))
29
-
30
-
31
- class AMPBlock1(torch.nn.Module):
32
- """
33
- AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
34
- AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1
35
-
36
- Args:
37
- h (AttrDict): Hyperparameters.
38
- channels (int): Number of convolution channels.
39
- kernel_size (int): Size of the convolution kernel. Default is 3.
40
- dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
41
- activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
42
- """
43
-
44
- def __init__(
45
- self,
46
- h: AttrDict,
47
- channels: int,
48
- kernel_size: int = 3,
49
- dilation: tuple = (1, 3, 5),
50
- activation: str = None,
51
- ):
52
- super().__init__()
53
-
54
- self.h = h
55
-
56
- self.convs1 = nn.ModuleList(
57
- [
58
- weight_norm(
59
- Conv1d(
60
- channels,
61
- channels,
62
- kernel_size,
63
- stride=1,
64
- dilation=d,
65
- padding=get_padding(kernel_size, d),
66
- )
67
- )
68
- for d in dilation
69
- ]
70
- )
71
- self.convs1.apply(init_weights)
72
-
73
- self.convs2 = nn.ModuleList(
74
- [
75
- weight_norm(
76
- Conv1d(
77
- channels,
78
- channels,
79
- kernel_size,
80
- stride=1,
81
- dilation=1,
82
- padding=get_padding(kernel_size, 1),
83
- )
84
- )
85
- for _ in range(len(dilation))
86
- ]
87
- )
88
- self.convs2.apply(init_weights)
89
-
90
- self.num_layers = len(self.convs1) + len(
91
- self.convs2
92
- ) # Total number of conv layers
93
-
94
- # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
95
- if self.h.get("use_cuda_kernel", False):
96
- from alias_free_activation.cuda.activation1d import (
97
- Activation1d as CudaActivation1d,
98
- )
99
-
100
- Activation1d = CudaActivation1d
101
- else:
102
- Activation1d = TorchActivation1d
103
-
104
- # Activation functions
105
- if activation == "snake":
106
- self.activations = nn.ModuleList(
107
- [
108
- Activation1d(
109
- activation=activations.Snake(
110
- channels, alpha_logscale=h.snake_logscale
111
- )
112
- )
113
- for _ in range(self.num_layers)
114
- ]
115
- )
116
- elif activation == "snakebeta":
117
- self.activations = nn.ModuleList(
118
- [
119
- Activation1d(
120
- activation=activations.SnakeBeta(
121
- channels, alpha_logscale=h.snake_logscale
122
- )
123
- )
124
- for _ in range(self.num_layers)
125
- ]
126
- )
127
- else:
128
- raise NotImplementedError(
129
- "activation incorrectly specified. check the config file and look for 'activation'."
130
- )
131
-
132
- def forward(self, x):
133
- acts1, acts2 = self.activations[::2], self.activations[1::2]
134
- for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
135
- xt = a1(x)
136
- xt = c1(xt)
137
- xt = a2(xt)
138
- xt = c2(xt)
139
- x = xt + x
140
-
141
- return x
142
-
143
- def remove_weight_norm(self):
144
- for l in self.convs1:
145
- remove_weight_norm(l)
146
- for l in self.convs2:
147
- remove_weight_norm(l)
148
-
149
-
150
- class AMPBlock2(torch.nn.Module):
151
- """
152
- AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
153
- Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1
154
-
155
- Args:
156
- h (AttrDict): Hyperparameters.
157
- channels (int): Number of convolution channels.
158
- kernel_size (int): Size of the convolution kernel. Default is 3.
159
- dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
160
- activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
161
- """
162
-
163
- def __init__(
164
- self,
165
- h: AttrDict,
166
- channels: int,
167
- kernel_size: int = 3,
168
- dilation: tuple = (1, 3, 5),
169
- activation: str = None,
170
- ):
171
- super().__init__()
172
-
173
- self.h = h
174
-
175
- self.convs = nn.ModuleList(
176
- [
177
- weight_norm(
178
- Conv1d(
179
- channels,
180
- channels,
181
- kernel_size,
182
- stride=1,
183
- dilation=d,
184
- padding=get_padding(kernel_size, d),
185
- )
186
- )
187
- for d in dilation
188
- ]
189
- )
190
- self.convs.apply(init_weights)
191
-
192
- self.num_layers = len(self.convs) # Total number of conv layers
193
-
194
- # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
195
- if self.h.get("use_cuda_kernel", False):
196
- from alias_free_activation.cuda.activation1d import (
197
- Activation1d as CudaActivation1d,
198
- )
199
-
200
- Activation1d = CudaActivation1d
201
- else:
202
- Activation1d = TorchActivation1d
203
-
204
- # Activation functions
205
- if activation == "snake":
206
- self.activations = nn.ModuleList(
207
- [
208
- Activation1d(
209
- activation=activations.Snake(
210
- channels, alpha_logscale=h.snake_logscale
211
- )
212
- )
213
- for _ in range(self.num_layers)
214
- ]
215
- )
216
- elif activation == "snakebeta":
217
- self.activations = nn.ModuleList(
218
- [
219
- Activation1d(
220
- activation=activations.SnakeBeta(
221
- channels, alpha_logscale=h.snake_logscale
222
- )
223
- )
224
- for _ in range(self.num_layers)
225
- ]
226
- )
227
- else:
228
- raise NotImplementedError(
229
- "activation incorrectly specified. check the config file and look for 'activation'."
230
- )
231
-
232
- def forward(self, x):
233
- for c, a in zip(self.convs, self.activations):
234
- xt = a(x)
235
- xt = c(xt)
236
- x = xt + x
237
-
238
- def remove_weight_norm(self):
239
- for l in self.convs:
240
- remove_weight_norm(l)
241
-
242
-
243
- class BigVGAN(
244
- torch.nn.Module,
245
- PyTorchModelHubMixin,
246
- library_name="bigvgan",
247
- repo_url="https://github.com/NVIDIA/BigVGAN",
248
- docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md",
249
- pipeline_tag="audio-to-audio",
250
- license="mit",
251
- tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"],
252
- ):
253
- """
254
- BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks).
255
- New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks.
256
-
257
- Args:
258
- h (AttrDict): Hyperparameters.
259
- use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels.
260
-
261
- Note:
262
- - The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported.
263
- - Ensure that the activation function is correctly specified in the hyperparameters (h.activation).
264
- """
265
-
266
- def __init__(self, h: AttrDict, use_cuda_kernel: bool = False):
267
- super().__init__()
268
- self.h = h
269
- self.h["use_cuda_kernel"] = use_cuda_kernel
270
-
271
- # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
272
- if self.h.get("use_cuda_kernel", False):
273
- from alias_free_activation.cuda.activation1d import (
274
- Activation1d as CudaActivation1d,
275
- )
276
-
277
- Activation1d = CudaActivation1d
278
- else:
279
- Activation1d = TorchActivation1d
280
-
281
- self.num_kernels = len(h.resblock_kernel_sizes)
282
- self.num_upsamples = len(h.upsample_rates)
283
-
284
- # Pre-conv
285
- self.conv_pre = weight_norm(
286
- Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)
287
- )
288
-
289
- # Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
290
- if h.resblock == "1":
291
- resblock_class = AMPBlock1
292
- elif h.resblock == "2":
293
- resblock_class = AMPBlock2
294
- else:
295
- raise ValueError(
296
- f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}"
297
- )
298
-
299
- # Transposed conv-based upsamplers. does not apply anti-aliasing
300
- self.ups = nn.ModuleList()
301
- for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
302
- self.ups.append(
303
- nn.ModuleList(
304
- [
305
- weight_norm(
306
- ConvTranspose1d(
307
- h.upsample_initial_channel // (2**i),
308
- h.upsample_initial_channel // (2 ** (i + 1)),
309
- k,
310
- u,
311
- padding=(k - u) // 2,
312
- )
313
- )
314
- ]
315
- )
316
- )
317
-
318
- # Residual blocks using anti-aliased multi-periodicity composition modules (AMP)
319
- self.resblocks = nn.ModuleList()
320
- for i in range(len(self.ups)):
321
- ch = h.upsample_initial_channel // (2 ** (i + 1))
322
- for j, (k, d) in enumerate(
323
- zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)
324
- ):
325
- self.resblocks.append(
326
- resblock_class(h, ch, k, d, activation=h.activation)
327
- )
328
-
329
- # Post-conv
330
- activation_post = (
331
- activations.Snake(ch, alpha_logscale=h.snake_logscale)
332
- if h.activation == "snake"
333
- else (
334
- activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale)
335
- if h.activation == "snakebeta"
336
- else None
337
- )
338
- )
339
- if activation_post is None:
340
- raise NotImplementedError(
341
- "activation incorrectly specified. check the config file and look for 'activation'."
342
- )
343
-
344
- self.activation_post = Activation1d(activation=activation_post)
345
-
346
- # Whether to use bias for the final conv_post. Default to True for backward compatibility
347
- self.use_bias_at_final = h.get("use_bias_at_final", True)
348
- self.conv_post = weight_norm(
349
- Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final)
350
- )
351
-
352
- # Weight initialization
353
- for i in range(len(self.ups)):
354
- self.ups[i].apply(init_weights)
355
- self.conv_post.apply(init_weights)
356
-
357
- # Final tanh activation. Defaults to True for backward compatibility
358
- self.use_tanh_at_final = h.get("use_tanh_at_final", True)
359
-
360
- def forward(self, x):
361
- # Pre-conv
362
- x = self.conv_pre(x)
363
-
364
- for i in range(self.num_upsamples):
365
- # Upsampling
366
- for i_up in range(len(self.ups[i])):
367
- x = self.ups[i][i_up](x)
368
- # AMP blocks
369
- xs = None
370
- for j in range(self.num_kernels):
371
- if xs is None:
372
- xs = self.resblocks[i * self.num_kernels + j](x)
373
- else:
374
- xs += self.resblocks[i * self.num_kernels + j](x)
375
- x = xs / self.num_kernels
376
-
377
- # Post-conv
378
- x = self.activation_post(x)
379
- x = self.conv_post(x)
380
- # Final tanh activation
381
- if self.use_tanh_at_final:
382
- x = torch.tanh(x)
383
- else:
384
- x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1]
385
-
386
- return x
387
-
388
- def remove_weight_norm(self):
389
- try:
390
- print("Removing weight norm...")
391
- for l in self.ups:
392
- for l_i in l:
393
- remove_weight_norm(l_i)
394
- for l in self.resblocks:
395
- l.remove_weight_norm()
396
- remove_weight_norm(self.conv_pre)
397
- remove_weight_norm(self.conv_post)
398
- except ValueError:
399
- print("[INFO] Model already removed weight norm. Skipping!")
400
- pass
401
-
402
- # Additional methods for huggingface_hub support
403
- def _save_pretrained(self, save_directory: Path) -> None:
404
- """Save weights and config.json from a Pytorch model to a local directory."""
405
-
406
- model_path = save_directory / "bigvgan_generator.pt"
407
- torch.save({"generator": self.state_dict()}, model_path)
408
-
409
- config_path = save_directory / "config.json"
410
- with open(config_path, "w") as config_file:
411
- json.dump(self.h, config_file, indent=4)
412
-
413
- @classmethod
414
- def _from_pretrained(
415
- cls,
416
- *,
417
- model_id: str,
418
- revision: str,
419
- cache_dir: str,
420
- force_download: bool,
421
- proxies: Optional[Dict],
422
- resume_download: bool,
423
- local_files_only: bool,
424
- token: Union[str, bool, None],
425
- map_location: str = "cpu", # Additional argument
426
- strict: bool = False, # Additional argument
427
- use_cuda_kernel: bool = False,
428
- **model_kwargs,
429
- ):
430
- """Load Pytorch pretrained weights and return the loaded model."""
431
-
432
- # Download and load hyperparameters (h) used by BigVGAN
433
- if os.path.isdir(model_id):
434
- print("Loading config.json from local directory")
435
- config_file = os.path.join(model_id, "config.json")
436
- else:
437
- config_file = hf_hub_download(
438
- repo_id=model_id,
439
- filename="config.json",
440
- revision=revision,
441
- cache_dir=cache_dir,
442
- force_download=force_download,
443
- proxies=proxies,
444
- resume_download=resume_download,
445
- token=token,
446
- local_files_only=local_files_only,
447
- )
448
- h = load_hparams_from_json(config_file)
449
-
450
- # instantiate BigVGAN using h
451
- if use_cuda_kernel:
452
- print(
453
- f"[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!"
454
- )
455
- print(
456
- f"[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!"
457
- )
458
- print(
459
- f"[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis"
460
- )
461
- model = cls(h, use_cuda_kernel=use_cuda_kernel)
462
-
463
- # Download and load pretrained generator weight
464
- if os.path.isdir(model_id):
465
- print("Loading weights from local directory")
466
- model_file = os.path.join(model_id, "bigvgan_generator.pt")
467
- else:
468
- print(f"Loading weights from {model_id}")
469
- model_file = hf_hub_download(
470
- repo_id=model_id,
471
- filename="bigvgan_generator.pt",
472
- revision=revision,
473
- cache_dir=cache_dir,
474
- force_download=force_download,
475
- proxies=proxies,
476
- resume_download=resume_download,
477
- token=token,
478
- local_files_only=local_files_only,
479
- )
480
-
481
- checkpoint_dict = torch.load(model_file, map_location=map_location)
482
-
483
- try:
484
- model.load_state_dict(checkpoint_dict["generator"])
485
- except RuntimeError:
486
- print(
487
- f"[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!"
488
- )
489
- model.remove_weight_norm()
490
- model.load_state_dict(checkpoint_dict["generator"])
491
-
492
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/bigvgan/config.json DELETED
@@ -1,63 +0,0 @@
1
- {
2
- "resblock": "1",
3
- "num_gpus": 0,
4
- "batch_size": 32,
5
- "learning_rate": 0.0001,
6
- "adam_b1": 0.8,
7
- "adam_b2": 0.99,
8
- "lr_decay": 0.9999996,
9
- "seed": 1234,
10
-
11
- "upsample_rates": [4,4,2,2,2,2],
12
- "upsample_kernel_sizes": [8,8,4,4,4,4],
13
- "upsample_initial_channel": 1536,
14
- "resblock_kernel_sizes": [3,7,11],
15
- "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
16
-
17
- "use_tanh_at_final": false,
18
- "use_bias_at_final": false,
19
-
20
- "activation": "snakebeta",
21
- "snake_logscale": true,
22
-
23
- "use_cqtd_instead_of_mrd": true,
24
- "cqtd_filters": 128,
25
- "cqtd_max_filters": 1024,
26
- "cqtd_filters_scale": 1,
27
- "cqtd_dilations": [1, 2, 4],
28
- "cqtd_hop_lengths": [512, 256, 256],
29
- "cqtd_n_octaves": [9, 9, 9],
30
- "cqtd_bins_per_octaves": [24, 36, 48],
31
-
32
- "mpd_reshapes": [2, 3, 5, 7, 11],
33
- "use_spectral_norm": false,
34
- "discriminator_channel_mult": 1,
35
-
36
- "use_multiscale_melloss": true,
37
- "lambda_melloss": 15,
38
-
39
- "clip_grad_norm": 500,
40
-
41
- "segment_size": 65536,
42
- "num_mels": 80,
43
- "num_freq": 1025,
44
- "n_fft": 1024,
45
- "hop_size": 256,
46
- "win_size": 1024,
47
-
48
- "sampling_rate": 22050,
49
-
50
- "fmin": 0,
51
- "fmax": null,
52
- "fmax_for_loss": null,
53
-
54
- "normalize_volume": true,
55
-
56
- "num_workers": 4,
57
-
58
- "dist_config": {
59
- "dist_backend": "nccl",
60
- "dist_url": "tcp://localhost:54321",
61
- "world_size": 1
62
- }
63
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/bigvgan/env.py DELETED
@@ -1,18 +0,0 @@
1
- # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
2
- # LICENSE is in incl_licenses directory.
3
-
4
- import os
5
- import shutil
6
-
7
-
8
- class AttrDict(dict):
9
- def __init__(self, *args, **kwargs):
10
- super(AttrDict, self).__init__(*args, **kwargs)
11
- self.__dict__ = self
12
-
13
-
14
- def build_env(config, config_name, path):
15
- t_path = os.path.join(path, config_name)
16
- if config != t_path:
17
- os.makedirs(path, exist_ok=True)
18
- shutil.copyfile(config, os.path.join(path, config_name))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/bigvgan/filter.py DELETED
@@ -1,101 +0,0 @@
1
- # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
- # LICENSE is in incl_licenses directory.
3
-
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
- import math
8
-
9
- if "sinc" in dir(torch):
10
- sinc = torch.sinc
11
- else:
12
- # This code is adopted from adefossez's julius.core.sinc under the MIT License
13
- # https://adefossez.github.io/julius/julius/core.html
14
- # LICENSE is in incl_licenses directory.
15
- def sinc(x: torch.Tensor):
16
- """
17
- Implementation of sinc, i.e. sin(pi * x) / (pi * x)
18
- __Warning__: Different to julius.sinc, the input is multiplied by `pi`!
19
- """
20
- return torch.where(
21
- x == 0,
22
- torch.tensor(1.0, device=x.device, dtype=x.dtype),
23
- torch.sin(math.pi * x) / math.pi / x,
24
- )
25
-
26
-
27
- # This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License
28
- # https://adefossez.github.io/julius/julius/lowpass.html
29
- # LICENSE is in incl_licenses directory.
30
- def kaiser_sinc_filter1d(
31
- cutoff, half_width, kernel_size
32
- ): # return filter [1,1,kernel_size]
33
- even = kernel_size % 2 == 0
34
- half_size = kernel_size // 2
35
-
36
- # For kaiser window
37
- delta_f = 4 * half_width
38
- A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
39
- if A > 50.0:
40
- beta = 0.1102 * (A - 8.7)
41
- elif A >= 21.0:
42
- beta = 0.5842 * (A - 21) ** 0.4 + 0.07886 * (A - 21.0)
43
- else:
44
- beta = 0.0
45
- window = torch.kaiser_window(kernel_size, beta=beta, periodic=False)
46
-
47
- # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio
48
- if even:
49
- time = torch.arange(-half_size, half_size) + 0.5
50
- else:
51
- time = torch.arange(kernel_size) - half_size
52
- if cutoff == 0:
53
- filter_ = torch.zeros_like(time)
54
- else:
55
- filter_ = 2 * cutoff * window * sinc(2 * cutoff * time)
56
- """
57
- Normalize filter to have sum = 1, otherwise we will have a small leakage of the constant component in the input signal.
58
- """
59
- filter_ /= filter_.sum()
60
- filter = filter_.view(1, 1, kernel_size)
61
-
62
- return filter
63
-
64
-
65
- class LowPassFilter1d(nn.Module):
66
- def __init__(
67
- self,
68
- cutoff=0.5,
69
- half_width=0.6,
70
- stride: int = 1,
71
- padding: bool = True,
72
- padding_mode: str = "replicate",
73
- kernel_size: int = 12,
74
- ):
75
- """
76
- kernel_size should be even number for stylegan3 setup, in this implementation, odd number is also possible.
77
- """
78
- super().__init__()
79
- if cutoff < -0.0:
80
- raise ValueError("Minimum cutoff must be larger than zero.")
81
- if cutoff > 0.5:
82
- raise ValueError("A cutoff above 0.5 does not make sense.")
83
- self.kernel_size = kernel_size
84
- self.even = kernel_size % 2 == 0
85
- self.pad_left = kernel_size // 2 - int(self.even)
86
- self.pad_right = kernel_size // 2
87
- self.stride = stride
88
- self.padding = padding
89
- self.padding_mode = padding_mode
90
- filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
91
- self.register_buffer("filter", filter)
92
-
93
- # Input [B, C, T]
94
- def forward(self, x):
95
- _, C, _ = x.shape
96
-
97
- if self.padding:
98
- x = F.pad(x, (self.pad_left, self.pad_right), mode=self.padding_mode)
99
- out = F.conv1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C)
100
-
101
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/bigvgan/meldataset.py DELETED
@@ -1,354 +0,0 @@
1
- # Copyright (c) 2024 NVIDIA CORPORATION.
2
- # Licensed under the MIT license.
3
-
4
- # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
- # LICENSE is in incl_licenses directory.
6
-
7
- import math
8
- import os
9
- import random
10
- import torch
11
- import torch.utils.data
12
- import numpy as np
13
- from librosa.util import normalize
14
- from scipy.io.wavfile import read
15
- from librosa.filters import mel as librosa_mel_fn
16
- import pathlib
17
- from tqdm import tqdm
18
-
19
- MAX_WAV_VALUE = 32767.0 # NOTE: 32768.0 -1 to prevent int16 overflow (results in popping sound in corner cases)
20
-
21
-
22
- def load_wav(full_path, sr_target):
23
- sampling_rate, data = read(full_path)
24
- if sampling_rate != sr_target:
25
- raise RuntimeError(
26
- f"Sampling rate of the file {full_path} is {sampling_rate} Hz, but the model requires {sr_target} Hz"
27
- )
28
- return data, sampling_rate
29
-
30
-
31
- def dynamic_range_compression(x, C=1, clip_val=1e-5):
32
- return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
33
-
34
-
35
- def dynamic_range_decompression(x, C=1):
36
- return np.exp(x) / C
37
-
38
-
39
- def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
40
- return torch.log(torch.clamp(x, min=clip_val) * C)
41
-
42
-
43
- def dynamic_range_decompression_torch(x, C=1):
44
- return torch.exp(x) / C
45
-
46
-
47
- def spectral_normalize_torch(magnitudes):
48
- return dynamic_range_compression_torch(magnitudes)
49
-
50
-
51
- def spectral_de_normalize_torch(magnitudes):
52
- return dynamic_range_decompression_torch(magnitudes)
53
-
54
-
55
- mel_basis_cache = {}
56
- hann_window_cache = {}
57
-
58
-
59
- def mel_spectrogram(
60
- y: torch.Tensor,
61
- n_fft: int,
62
- num_mels: int,
63
- sampling_rate: int,
64
- hop_size: int,
65
- win_size: int,
66
- fmin: int,
67
- fmax: int = None,
68
- center: bool = False,
69
- ) -> torch.Tensor:
70
- """
71
- Calculate the mel spectrogram of an input signal.
72
- This function uses slaney norm for the librosa mel filterbank (using librosa.filters.mel) and uses Hann window for STFT (using torch.stft).
73
-
74
- Args:
75
- y (torch.Tensor): Input signal.
76
- n_fft (int): FFT size.
77
- num_mels (int): Number of mel bins.
78
- sampling_rate (int): Sampling rate of the input signal.
79
- hop_size (int): Hop size for STFT.
80
- win_size (int): Window size for STFT.
81
- fmin (int): Minimum frequency for mel filterbank.
82
- fmax (int): Maximum frequency for mel filterbank. If None, defaults to half the sampling rate (fmax = sr / 2.0) inside librosa_mel_fn
83
- center (bool): Whether to pad the input to center the frames. Default is False.
84
-
85
- Returns:
86
- torch.Tensor: Mel spectrogram.
87
- """
88
- if torch.min(y) < -1.0:
89
- print(f"[WARNING] Min value of input waveform signal is {torch.min(y)}")
90
- if torch.max(y) > 1.0:
91
- print(f"[WARNING] Max value of input waveform signal is {torch.max(y)}")
92
-
93
- device = y.device
94
- key = f"{n_fft}_{num_mels}_{sampling_rate}_{hop_size}_{win_size}_{fmin}_{fmax}_{device}"
95
-
96
- if key not in mel_basis_cache:
97
- mel = librosa_mel_fn(
98
- sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax
99
- )
100
- mel_basis_cache[key] = torch.from_numpy(mel).float().to(device)
101
- hann_window_cache[key] = torch.hann_window(win_size).to(device)
102
-
103
- mel_basis = mel_basis_cache[key]
104
- hann_window = hann_window_cache[key]
105
-
106
- padding = (n_fft - hop_size) // 2
107
- y = torch.nn.functional.pad(
108
- y.unsqueeze(1), (padding, padding), mode="reflect"
109
- ).squeeze(1)
110
-
111
- spec = torch.stft(
112
- y,
113
- n_fft,
114
- hop_length=hop_size,
115
- win_length=win_size,
116
- window=hann_window,
117
- center=center,
118
- pad_mode="reflect",
119
- normalized=False,
120
- onesided=True,
121
- return_complex=True,
122
- )
123
- spec = torch.sqrt(torch.view_as_real(spec).pow(2).sum(-1) + 1e-9)
124
-
125
- mel_spec = torch.matmul(mel_basis, spec)
126
- mel_spec = spectral_normalize_torch(mel_spec)
127
-
128
- return mel_spec
129
-
130
-
131
- def get_mel_spectrogram(wav, h):
132
- """
133
- Generate mel spectrogram from a waveform using given hyperparameters.
134
-
135
- Args:
136
- wav (torch.Tensor): Input waveform.
137
- h: Hyperparameters object with attributes n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax.
138
-
139
- Returns:
140
- torch.Tensor: Mel spectrogram.
141
- """
142
- return mel_spectrogram(
143
- wav,
144
- h.n_fft,
145
- h.num_mels,
146
- h.sampling_rate,
147
- h.hop_size,
148
- h.win_size,
149
- h.fmin,
150
- h.fmax,
151
- )
152
-
153
-
154
- def get_dataset_filelist(a):
155
- training_files = []
156
- validation_files = []
157
- list_unseen_validation_files = []
158
-
159
- with open(a.input_training_file, "r", encoding="utf-8") as fi:
160
- training_files = [
161
- os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav")
162
- for x in fi.read().split("\n")
163
- if len(x) > 0
164
- ]
165
- print(f"first training file: {training_files[0]}")
166
-
167
- with open(a.input_validation_file, "r", encoding="utf-8") as fi:
168
- validation_files = [
169
- os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav")
170
- for x in fi.read().split("\n")
171
- if len(x) > 0
172
- ]
173
- print(f"first validation file: {validation_files[0]}")
174
-
175
- for i in range(len(a.list_input_unseen_validation_file)):
176
- with open(a.list_input_unseen_validation_file[i], "r", encoding="utf-8") as fi:
177
- unseen_validation_files = [
178
- os.path.join(a.list_input_unseen_wavs_dir[i], x.split("|")[0] + ".wav")
179
- for x in fi.read().split("\n")
180
- if len(x) > 0
181
- ]
182
- print(
183
- f"first unseen {i}th validation fileset: {unseen_validation_files[0]}"
184
- )
185
- list_unseen_validation_files.append(unseen_validation_files)
186
-
187
- return training_files, validation_files, list_unseen_validation_files
188
-
189
-
190
- class MelDataset(torch.utils.data.Dataset):
191
- def __init__(
192
- self,
193
- training_files,
194
- hparams,
195
- segment_size,
196
- n_fft,
197
- num_mels,
198
- hop_size,
199
- win_size,
200
- sampling_rate,
201
- fmin,
202
- fmax,
203
- split=True,
204
- shuffle=True,
205
- n_cache_reuse=1,
206
- device=None,
207
- fmax_loss=None,
208
- fine_tuning=False,
209
- base_mels_path=None,
210
- is_seen=True,
211
- ):
212
- self.audio_files = training_files
213
- random.seed(1234)
214
- if shuffle:
215
- random.shuffle(self.audio_files)
216
- self.hparams = hparams
217
- self.is_seen = is_seen
218
- if self.is_seen:
219
- self.name = pathlib.Path(self.audio_files[0]).parts[0]
220
- else:
221
- self.name = "-".join(pathlib.Path(self.audio_files[0]).parts[:2]).strip("/")
222
-
223
- self.segment_size = segment_size
224
- self.sampling_rate = sampling_rate
225
- self.split = split
226
- self.n_fft = n_fft
227
- self.num_mels = num_mels
228
- self.hop_size = hop_size
229
- self.win_size = win_size
230
- self.fmin = fmin
231
- self.fmax = fmax
232
- self.fmax_loss = fmax_loss
233
- self.cached_wav = None
234
- self.n_cache_reuse = n_cache_reuse
235
- self._cache_ref_count = 0
236
- self.device = device
237
- self.fine_tuning = fine_tuning
238
- self.base_mels_path = base_mels_path
239
-
240
- print("[INFO] checking dataset integrity...")
241
- for i in tqdm(range(len(self.audio_files))):
242
- assert os.path.exists(
243
- self.audio_files[i]
244
- ), f"{self.audio_files[i]} not found"
245
-
246
- def __getitem__(self, index):
247
- filename = self.audio_files[index]
248
- if self._cache_ref_count == 0:
249
- audio, sampling_rate = load_wav(filename, self.sampling_rate)
250
- audio = audio / MAX_WAV_VALUE
251
- if not self.fine_tuning:
252
- audio = normalize(audio) * 0.95
253
- self.cached_wav = audio
254
- if sampling_rate != self.sampling_rate:
255
- raise ValueError(
256
- f"{sampling_rate} SR doesn't match target {self.sampling_rate} SR"
257
- )
258
- self._cache_ref_count = self.n_cache_reuse
259
- else:
260
- audio = self.cached_wav
261
- self._cache_ref_count -= 1
262
-
263
- audio = torch.FloatTensor(audio)
264
- audio = audio.unsqueeze(0)
265
-
266
- if not self.fine_tuning:
267
- if self.split:
268
- if audio.size(1) >= self.segment_size:
269
- max_audio_start = audio.size(1) - self.segment_size
270
- audio_start = random.randint(0, max_audio_start)
271
- audio = audio[:, audio_start : audio_start + self.segment_size]
272
- else:
273
- audio = torch.nn.functional.pad(
274
- audio, (0, self.segment_size - audio.size(1)), "constant"
275
- )
276
-
277
- mel = mel_spectrogram(
278
- audio,
279
- self.n_fft,
280
- self.num_mels,
281
- self.sampling_rate,
282
- self.hop_size,
283
- self.win_size,
284
- self.fmin,
285
- self.fmax,
286
- center=False,
287
- )
288
- else: # Validation step
289
- # Match audio length to self.hop_size * n for evaluation
290
- if (audio.size(1) % self.hop_size) != 0:
291
- audio = audio[:, : -(audio.size(1) % self.hop_size)]
292
- mel = mel_spectrogram(
293
- audio,
294
- self.n_fft,
295
- self.num_mels,
296
- self.sampling_rate,
297
- self.hop_size,
298
- self.win_size,
299
- self.fmin,
300
- self.fmax,
301
- center=False,
302
- )
303
- assert (
304
- audio.shape[1] == mel.shape[2] * self.hop_size
305
- ), f"audio shape {audio.shape} mel shape {mel.shape}"
306
-
307
- else:
308
- mel = np.load(
309
- os.path.join(
310
- self.base_mels_path,
311
- os.path.splitext(os.path.split(filename)[-1])[0] + ".npy",
312
- )
313
- )
314
- mel = torch.from_numpy(mel)
315
-
316
- if len(mel.shape) < 3:
317
- mel = mel.unsqueeze(0)
318
-
319
- if self.split:
320
- frames_per_seg = math.ceil(self.segment_size / self.hop_size)
321
-
322
- if audio.size(1) >= self.segment_size:
323
- mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)
324
- mel = mel[:, :, mel_start : mel_start + frames_per_seg]
325
- audio = audio[
326
- :,
327
- mel_start
328
- * self.hop_size : (mel_start + frames_per_seg)
329
- * self.hop_size,
330
- ]
331
- else:
332
- mel = torch.nn.functional.pad(
333
- mel, (0, frames_per_seg - mel.size(2)), "constant"
334
- )
335
- audio = torch.nn.functional.pad(
336
- audio, (0, self.segment_size - audio.size(1)), "constant"
337
- )
338
-
339
- mel_loss = mel_spectrogram(
340
- audio,
341
- self.n_fft,
342
- self.num_mels,
343
- self.sampling_rate,
344
- self.hop_size,
345
- self.win_size,
346
- self.fmin,
347
- self.fmax_loss,
348
- center=False,
349
- )
350
-
351
- return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
352
-
353
- def __len__(self):
354
- return len(self.audio_files)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/bigvgan/resample.py DELETED
@@ -1,58 +0,0 @@
1
- # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
- # LICENSE is in incl_licenses directory.
3
-
4
- import torch.nn as nn
5
- from torch.nn import functional as F
6
- from .filter import LowPassFilter1d
7
- from .filter import kaiser_sinc_filter1d
8
-
9
-
10
- class UpSample1d(nn.Module):
11
- def __init__(self, ratio=2, kernel_size=None):
12
- super().__init__()
13
- self.ratio = ratio
14
- self.kernel_size = (
15
- int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
16
- )
17
- self.stride = ratio
18
- self.pad = self.kernel_size // ratio - 1
19
- self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2
20
- self.pad_right = (
21
- self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2
22
- )
23
- filter = kaiser_sinc_filter1d(
24
- cutoff=0.5 / ratio, half_width=0.6 / ratio, kernel_size=self.kernel_size
25
- )
26
- self.register_buffer("filter", filter)
27
-
28
- # x: [B, C, T]
29
- def forward(self, x):
30
- _, C, _ = x.shape
31
-
32
- x = F.pad(x, (self.pad, self.pad), mode="replicate")
33
- x = self.ratio * F.conv_transpose1d(
34
- x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C
35
- )
36
- x = x[..., self.pad_left : -self.pad_right]
37
-
38
- return x
39
-
40
-
41
- class DownSample1d(nn.Module):
42
- def __init__(self, ratio=2, kernel_size=None):
43
- super().__init__()
44
- self.ratio = ratio
45
- self.kernel_size = (
46
- int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
47
- )
48
- self.lowpass = LowPassFilter1d(
49
- cutoff=0.5 / ratio,
50
- half_width=0.6 / ratio,
51
- stride=ratio,
52
- kernel_size=self.kernel_size,
53
- )
54
-
55
- def forward(self, x):
56
- xx = self.lowpass(x)
57
-
58
- return xx
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/bigvgan/utils.py DELETED
@@ -1,99 +0,0 @@
1
- # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
2
- # LICENSE is in incl_licenses directory.
3
-
4
- import glob
5
- import os
6
- import matplotlib
7
- import torch
8
- from torch.nn.utils import weight_norm
9
-
10
- matplotlib.use("Agg")
11
- import matplotlib.pylab as plt
12
- from .meldataset import MAX_WAV_VALUE
13
- from scipy.io.wavfile import write
14
-
15
-
16
- def plot_spectrogram(spectrogram):
17
- fig, ax = plt.subplots(figsize=(10, 2))
18
- im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
19
- plt.colorbar(im, ax=ax)
20
-
21
- fig.canvas.draw()
22
- plt.close()
23
-
24
- return fig
25
-
26
-
27
- def plot_spectrogram_clipped(spectrogram, clip_max=2.0):
28
- fig, ax = plt.subplots(figsize=(10, 2))
29
- im = ax.imshow(
30
- spectrogram,
31
- aspect="auto",
32
- origin="lower",
33
- interpolation="none",
34
- vmin=1e-6,
35
- vmax=clip_max,
36
- )
37
- plt.colorbar(im, ax=ax)
38
-
39
- fig.canvas.draw()
40
- plt.close()
41
-
42
- return fig
43
-
44
-
45
- def init_weights(m, mean=0.0, std=0.01):
46
- classname = m.__class__.__name__
47
- if classname.find("Conv") != -1:
48
- m.weight.data.normal_(mean, std)
49
-
50
-
51
- def apply_weight_norm(m):
52
- classname = m.__class__.__name__
53
- if classname.find("Conv") != -1:
54
- weight_norm(m)
55
-
56
-
57
- def get_padding(kernel_size, dilation=1):
58
- return int((kernel_size * dilation - dilation) / 2)
59
-
60
-
61
- def load_checkpoint(filepath, device):
62
- assert os.path.isfile(filepath)
63
- print(f"Loading '{filepath}'")
64
- checkpoint_dict = torch.load(filepath, map_location=device)
65
- print("Complete.")
66
- return checkpoint_dict
67
-
68
-
69
- def save_checkpoint(filepath, obj):
70
- print(f"Saving checkpoint to {filepath}")
71
- torch.save(obj, filepath)
72
- print("Complete.")
73
-
74
-
75
- def scan_checkpoint(cp_dir, prefix, renamed_file=None):
76
- # Fallback to original scanning logic first
77
- pattern = os.path.join(cp_dir, prefix + "????????")
78
- cp_list = glob.glob(pattern)
79
-
80
- if len(cp_list) > 0:
81
- last_checkpoint_path = sorted(cp_list)[-1]
82
- print(f"[INFO] Resuming from checkpoint: '{last_checkpoint_path}'")
83
- return last_checkpoint_path
84
-
85
- # If no pattern-based checkpoints are found, check for renamed file
86
- if renamed_file:
87
- renamed_path = os.path.join(cp_dir, renamed_file)
88
- if os.path.isfile(renamed_path):
89
- print(f"[INFO] Resuming from renamed checkpoint: '{renamed_file}'")
90
- return renamed_path
91
-
92
- return None
93
-
94
-
95
- def save_audio(audio, path, sr):
96
- # wav: torch with 1d shape
97
- audio = audio * MAX_WAV_VALUE
98
- audio = audio.cpu().numpy().astype("int16")
99
- write(path, sr, audio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/campplus/DTDNN.py DELETED
@@ -1,115 +0,0 @@
1
- # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved.
2
- # Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
3
-
4
- from collections import OrderedDict
5
-
6
- import torch
7
- from torch import nn
8
- import torch.nn.functional as F
9
-
10
- from modules.campplus.layers import DenseLayer, StatsPool, TDNNLayer, CAMDenseTDNNBlock, TransitLayer, BasicResBlock, get_nonlinear
11
-
12
-
13
- class FCM(nn.Module):
14
- def __init__(self,
15
- block=BasicResBlock,
16
- num_blocks=[2, 2],
17
- m_channels=32,
18
- feat_dim=80):
19
- super(FCM, self).__init__()
20
- self.in_planes = m_channels
21
- self.conv1 = nn.Conv2d(1, m_channels, kernel_size=3, stride=1, padding=1, bias=False)
22
- self.bn1 = nn.BatchNorm2d(m_channels)
23
-
24
- self.layer1 = self._make_layer(block, m_channels, num_blocks[0], stride=2)
25
- self.layer2 = self._make_layer(block, m_channels, num_blocks[1], stride=2)
26
-
27
- self.conv2 = nn.Conv2d(m_channels, m_channels, kernel_size=3, stride=(2, 1), padding=1, bias=False)
28
- self.bn2 = nn.BatchNorm2d(m_channels)
29
- self.out_channels = m_channels * (feat_dim // 8)
30
-
31
- def _make_layer(self, block, planes, num_blocks, stride):
32
- strides = [stride] + [1] * (num_blocks - 1)
33
- layers = []
34
- for stride in strides:
35
- layers.append(block(self.in_planes, planes, stride))
36
- self.in_planes = planes * block.expansion
37
- return nn.Sequential(*layers)
38
-
39
- def forward(self, x):
40
- x = x.unsqueeze(1)
41
- out = F.relu(self.bn1(self.conv1(x)))
42
- out = self.layer1(out)
43
- out = self.layer2(out)
44
- out = F.relu(self.bn2(self.conv2(out)))
45
-
46
- shape = out.shape
47
- out = out.reshape(shape[0], shape[1]*shape[2], shape[3])
48
- return out
49
-
50
- class CAMPPlus(nn.Module):
51
- def __init__(self,
52
- feat_dim=80,
53
- embedding_size=512,
54
- growth_rate=32,
55
- bn_size=4,
56
- init_channels=128,
57
- config_str='batchnorm-relu',
58
- memory_efficient=True):
59
- super(CAMPPlus, self).__init__()
60
-
61
- self.head = FCM(feat_dim=feat_dim)
62
- channels = self.head.out_channels
63
-
64
- self.xvector = nn.Sequential(
65
- OrderedDict([
66
-
67
- ('tdnn',
68
- TDNNLayer(channels,
69
- init_channels,
70
- 5,
71
- stride=2,
72
- dilation=1,
73
- padding=-1,
74
- config_str=config_str)),
75
- ]))
76
- channels = init_channels
77
- for i, (num_layers, kernel_size,
78
- dilation) in enumerate(zip((12, 24, 16), (3, 3, 3), (1, 2, 2))):
79
- block = CAMDenseTDNNBlock(num_layers=num_layers,
80
- in_channels=channels,
81
- out_channels=growth_rate,
82
- bn_channels=bn_size * growth_rate,
83
- kernel_size=kernel_size,
84
- dilation=dilation,
85
- config_str=config_str,
86
- memory_efficient=memory_efficient)
87
- self.xvector.add_module('block%d' % (i + 1), block)
88
- channels = channels + num_layers * growth_rate
89
- self.xvector.add_module(
90
- 'transit%d' % (i + 1),
91
- TransitLayer(channels,
92
- channels // 2,
93
- bias=False,
94
- config_str=config_str))
95
- channels //= 2
96
-
97
- self.xvector.add_module(
98
- 'out_nonlinear', get_nonlinear(config_str, channels))
99
-
100
- self.xvector.add_module('stats', StatsPool())
101
- self.xvector.add_module(
102
- 'dense',
103
- DenseLayer(channels * 2, embedding_size, config_str='batchnorm_'))
104
-
105
- for m in self.modules():
106
- if isinstance(m, (nn.Conv1d, nn.Linear)):
107
- nn.init.kaiming_normal_(m.weight.data)
108
- if m.bias is not None:
109
- nn.init.zeros_(m.bias)
110
-
111
- def forward(self, x):
112
- x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T)
113
- x = self.head(x)
114
- x = self.xvector(x)
115
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/campplus/classifier.py DELETED
@@ -1,70 +0,0 @@
1
- # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved.
2
- # Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
3
-
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
-
8
- from modules.campplus.layers import DenseLayer
9
-
10
-
11
- class CosineClassifier(nn.Module):
12
- def __init__(
13
- self,
14
- input_dim,
15
- num_blocks=0,
16
- inter_dim=512,
17
- out_neurons=1000,
18
- ):
19
-
20
- super().__init__()
21
- self.blocks = nn.ModuleList()
22
-
23
- for index in range(num_blocks):
24
- self.blocks.append(
25
- DenseLayer(input_dim, inter_dim, config_str='batchnorm')
26
- )
27
- input_dim = inter_dim
28
-
29
- self.weight = nn.Parameter(
30
- torch.FloatTensor(out_neurons, input_dim)
31
- )
32
- nn.init.xavier_uniform_(self.weight)
33
-
34
- def forward(self, x):
35
- # x: [B, dim]
36
- for layer in self.blocks:
37
- x = layer(x)
38
-
39
- # normalized
40
- x = F.linear(F.normalize(x), F.normalize(self.weight))
41
- return x
42
-
43
- class LinearClassifier(nn.Module):
44
- def __init__(
45
- self,
46
- input_dim,
47
- num_blocks=0,
48
- inter_dim=512,
49
- out_neurons=1000,
50
- ):
51
-
52
- super().__init__()
53
- self.blocks = nn.ModuleList()
54
-
55
- self.nonlinear = nn.ReLU(inplace=True)
56
- for index in range(num_blocks):
57
- self.blocks.append(
58
- DenseLayer(input_dim, inter_dim, bias=True)
59
- )
60
- input_dim = inter_dim
61
-
62
- self.linear = nn.Linear(input_dim, out_neurons, bias=True)
63
-
64
- def forward(self, x):
65
- # x: [B, dim]
66
- x = self.nonlinear(x)
67
- for layer in self.blocks:
68
- x = layer(x)
69
- x = self.linear(x)
70
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/campplus/layers.py DELETED
@@ -1,253 +0,0 @@
1
- # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved.
2
- # Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
3
-
4
- import torch
5
- import torch.nn.functional as F
6
- import torch.utils.checkpoint as cp
7
- from torch import nn
8
-
9
-
10
- def get_nonlinear(config_str, channels):
11
- nonlinear = nn.Sequential()
12
- for name in config_str.split('-'):
13
- if name == 'relu':
14
- nonlinear.add_module('relu', nn.ReLU(inplace=True))
15
- elif name == 'prelu':
16
- nonlinear.add_module('prelu', nn.PReLU(channels))
17
- elif name == 'batchnorm':
18
- nonlinear.add_module('batchnorm', nn.BatchNorm1d(channels))
19
- elif name == 'batchnorm_':
20
- nonlinear.add_module('batchnorm',
21
- nn.BatchNorm1d(channels, affine=False))
22
- else:
23
- raise ValueError('Unexpected module ({}).'.format(name))
24
- return nonlinear
25
-
26
- def statistics_pooling(x, dim=-1, keepdim=False, unbiased=True, eps=1e-2):
27
- mean = x.mean(dim=dim)
28
- std = x.std(dim=dim, unbiased=unbiased)
29
- stats = torch.cat([mean, std], dim=-1)
30
- if keepdim:
31
- stats = stats.unsqueeze(dim=dim)
32
- return stats
33
-
34
-
35
- class StatsPool(nn.Module):
36
- def forward(self, x):
37
- return statistics_pooling(x)
38
-
39
-
40
- class TDNNLayer(nn.Module):
41
- def __init__(self,
42
- in_channels,
43
- out_channels,
44
- kernel_size,
45
- stride=1,
46
- padding=0,
47
- dilation=1,
48
- bias=False,
49
- config_str='batchnorm-relu'):
50
- super(TDNNLayer, self).__init__()
51
- if padding < 0:
52
- assert kernel_size % 2 == 1, 'Expect equal paddings, but got even kernel size ({})'.format(
53
- kernel_size)
54
- padding = (kernel_size - 1) // 2 * dilation
55
- self.linear = nn.Conv1d(in_channels,
56
- out_channels,
57
- kernel_size,
58
- stride=stride,
59
- padding=padding,
60
- dilation=dilation,
61
- bias=bias)
62
- self.nonlinear = get_nonlinear(config_str, out_channels)
63
-
64
- def forward(self, x):
65
- x = self.linear(x)
66
- x = self.nonlinear(x)
67
- return x
68
-
69
-
70
- class CAMLayer(nn.Module):
71
- def __init__(self,
72
- bn_channels,
73
- out_channels,
74
- kernel_size,
75
- stride,
76
- padding,
77
- dilation,
78
- bias,
79
- reduction=2):
80
- super(CAMLayer, self).__init__()
81
- self.linear_local = nn.Conv1d(bn_channels,
82
- out_channels,
83
- kernel_size,
84
- stride=stride,
85
- padding=padding,
86
- dilation=dilation,
87
- bias=bias)
88
- self.linear1 = nn.Conv1d(bn_channels, bn_channels // reduction, 1)
89
- self.relu = nn.ReLU(inplace=True)
90
- self.linear2 = nn.Conv1d(bn_channels // reduction, out_channels, 1)
91
- self.sigmoid = nn.Sigmoid()
92
-
93
- def forward(self, x):
94
- y = self.linear_local(x)
95
- context = x.mean(-1, keepdim=True)+self.seg_pooling(x)
96
- context = self.relu(self.linear1(context))
97
- m = self.sigmoid(self.linear2(context))
98
- return y*m
99
-
100
- def seg_pooling(self, x, seg_len=100, stype='avg'):
101
- if stype == 'avg':
102
- seg = F.avg_pool1d(x, kernel_size=seg_len, stride=seg_len, ceil_mode=True)
103
- elif stype == 'max':
104
- seg = F.max_pool1d(x, kernel_size=seg_len, stride=seg_len, ceil_mode=True)
105
- else:
106
- raise ValueError('Wrong segment pooling type.')
107
- shape = seg.shape
108
- seg = seg.unsqueeze(-1).expand(*shape, seg_len).reshape(*shape[:-1], -1)
109
- seg = seg[..., :x.shape[-1]]
110
- return seg
111
-
112
-
113
- class CAMDenseTDNNLayer(nn.Module):
114
- def __init__(self,
115
- in_channels,
116
- out_channels,
117
- bn_channels,
118
- kernel_size,
119
- stride=1,
120
- dilation=1,
121
- bias=False,
122
- config_str='batchnorm-relu',
123
- memory_efficient=False):
124
- super(CAMDenseTDNNLayer, self).__init__()
125
- assert kernel_size % 2 == 1, 'Expect equal paddings, but got even kernel size ({})'.format(
126
- kernel_size)
127
- padding = (kernel_size - 1) // 2 * dilation
128
- self.memory_efficient = memory_efficient
129
- self.nonlinear1 = get_nonlinear(config_str, in_channels)
130
- self.linear1 = nn.Conv1d(in_channels, bn_channels, 1, bias=False)
131
- self.nonlinear2 = get_nonlinear(config_str, bn_channels)
132
- self.cam_layer = CAMLayer(bn_channels,
133
- out_channels,
134
- kernel_size,
135
- stride=stride,
136
- padding=padding,
137
- dilation=dilation,
138
- bias=bias)
139
-
140
- def bn_function(self, x):
141
- return self.linear1(self.nonlinear1(x))
142
-
143
- def forward(self, x):
144
- if self.training and self.memory_efficient:
145
- x = cp.checkpoint(self.bn_function, x)
146
- else:
147
- x = self.bn_function(x)
148
- x = self.cam_layer(self.nonlinear2(x))
149
- return x
150
-
151
-
152
- class CAMDenseTDNNBlock(nn.ModuleList):
153
- def __init__(self,
154
- num_layers,
155
- in_channels,
156
- out_channels,
157
- bn_channels,
158
- kernel_size,
159
- stride=1,
160
- dilation=1,
161
- bias=False,
162
- config_str='batchnorm-relu',
163
- memory_efficient=False):
164
- super(CAMDenseTDNNBlock, self).__init__()
165
- for i in range(num_layers):
166
- layer = CAMDenseTDNNLayer(in_channels=in_channels + i * out_channels,
167
- out_channels=out_channels,
168
- bn_channels=bn_channels,
169
- kernel_size=kernel_size,
170
- stride=stride,
171
- dilation=dilation,
172
- bias=bias,
173
- config_str=config_str,
174
- memory_efficient=memory_efficient)
175
- self.add_module('tdnnd%d' % (i + 1), layer)
176
-
177
- def forward(self, x):
178
- for layer in self:
179
- x = torch.cat([x, layer(x)], dim=1)
180
- return x
181
-
182
-
183
- class TransitLayer(nn.Module):
184
- def __init__(self,
185
- in_channels,
186
- out_channels,
187
- bias=True,
188
- config_str='batchnorm-relu'):
189
- super(TransitLayer, self).__init__()
190
- self.nonlinear = get_nonlinear(config_str, in_channels)
191
- self.linear = nn.Conv1d(in_channels, out_channels, 1, bias=bias)
192
-
193
- def forward(self, x):
194
- x = self.nonlinear(x)
195
- x = self.linear(x)
196
- return x
197
-
198
-
199
- class DenseLayer(nn.Module):
200
- def __init__(self,
201
- in_channels,
202
- out_channels,
203
- bias=False,
204
- config_str='batchnorm-relu'):
205
- super(DenseLayer, self).__init__()
206
- self.linear = nn.Conv1d(in_channels, out_channels, 1, bias=bias)
207
- self.nonlinear = get_nonlinear(config_str, out_channels)
208
-
209
- def forward(self, x):
210
- if len(x.shape) == 2:
211
- x = self.linear(x.unsqueeze(dim=-1)).squeeze(dim=-1)
212
- else:
213
- x = self.linear(x)
214
- x = self.nonlinear(x)
215
- return x
216
-
217
-
218
- class BasicResBlock(nn.Module):
219
- expansion = 1
220
-
221
- def __init__(self, in_planes, planes, stride=1):
222
- super(BasicResBlock, self).__init__()
223
- self.conv1 = nn.Conv2d(in_planes,
224
- planes,
225
- kernel_size=3,
226
- stride=(stride, 1),
227
- padding=1,
228
- bias=False)
229
- self.bn1 = nn.BatchNorm2d(planes)
230
- self.conv2 = nn.Conv2d(planes,
231
- planes,
232
- kernel_size=3,
233
- stride=1,
234
- padding=1,
235
- bias=False)
236
- self.bn2 = nn.BatchNorm2d(planes)
237
-
238
- self.shortcut = nn.Sequential()
239
- if stride != 1 or in_planes != self.expansion * planes:
240
- self.shortcut = nn.Sequential(
241
- nn.Conv2d(in_planes,
242
- self.expansion * planes,
243
- kernel_size=1,
244
- stride=(stride, 1),
245
- bias=False),
246
- nn.BatchNorm2d(self.expansion * planes))
247
-
248
- def forward(self, x):
249
- out = F.relu(self.bn1(self.conv1(x)))
250
- out = self.bn2(self.conv2(out))
251
- out += self.shortcut(x)
252
- out = F.relu(out)
253
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/commons.py DELETED
@@ -1,490 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
- from munch import Munch
7
- import json
8
-
9
-
10
- class AttrDict(dict):
11
- def __init__(self, *args, **kwargs):
12
- super(AttrDict, self).__init__(*args, **kwargs)
13
- self.__dict__ = self
14
-
15
-
16
- def init_weights(m, mean=0.0, std=0.01):
17
- classname = m.__class__.__name__
18
- if classname.find("Conv") != -1:
19
- m.weight.data.normal_(mean, std)
20
-
21
-
22
- def get_padding(kernel_size, dilation=1):
23
- return int((kernel_size * dilation - dilation) / 2)
24
-
25
-
26
- def convert_pad_shape(pad_shape):
27
- l = pad_shape[::-1]
28
- pad_shape = [item for sublist in l for item in sublist]
29
- return pad_shape
30
-
31
-
32
- def intersperse(lst, item):
33
- result = [item] * (len(lst) * 2 + 1)
34
- result[1::2] = lst
35
- return result
36
-
37
-
38
- def kl_divergence(m_p, logs_p, m_q, logs_q):
39
- """KL(P||Q)"""
40
- kl = (logs_q - logs_p) - 0.5
41
- kl += (
42
- 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
43
- )
44
- return kl
45
-
46
-
47
- def rand_gumbel(shape):
48
- """Sample from the Gumbel distribution, protect from overflows."""
49
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
50
- return -torch.log(-torch.log(uniform_samples))
51
-
52
-
53
- def rand_gumbel_like(x):
54
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
55
- return g
56
-
57
-
58
- def slice_segments(x, ids_str, segment_size=4):
59
- ret = torch.zeros_like(x[:, :, :segment_size])
60
- for i in range(x.size(0)):
61
- idx_str = ids_str[i]
62
- idx_end = idx_str + segment_size
63
- ret[i] = x[i, :, idx_str:idx_end]
64
- return ret
65
-
66
-
67
- def slice_segments_audio(x, ids_str, segment_size=4):
68
- ret = torch.zeros_like(x[:, :segment_size])
69
- for i in range(x.size(0)):
70
- idx_str = ids_str[i]
71
- idx_end = idx_str + segment_size
72
- ret[i] = x[i, idx_str:idx_end]
73
- return ret
74
-
75
-
76
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
77
- b, d, t = x.size()
78
- if x_lengths is None:
79
- x_lengths = t
80
- ids_str_max = x_lengths - segment_size + 1
81
- ids_str = ((torch.rand([b]).to(device=x.device) * ids_str_max).clip(0)).to(
82
- dtype=torch.long
83
- )
84
- ret = slice_segments(x, ids_str, segment_size)
85
- return ret, ids_str
86
-
87
-
88
- def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
89
- position = torch.arange(length, dtype=torch.float)
90
- num_timescales = channels // 2
91
- log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
92
- num_timescales - 1
93
- )
94
- inv_timescales = min_timescale * torch.exp(
95
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
96
- )
97
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
98
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
99
- signal = F.pad(signal, [0, 0, 0, channels % 2])
100
- signal = signal.view(1, channels, length)
101
- return signal
102
-
103
-
104
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
105
- b, channels, length = x.size()
106
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
107
- return x + signal.to(dtype=x.dtype, device=x.device)
108
-
109
-
110
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
111
- b, channels, length = x.size()
112
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
113
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
114
-
115
-
116
- def subsequent_mask(length):
117
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
118
- return mask
119
-
120
-
121
- @torch.jit.script
122
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
123
- n_channels_int = n_channels[0]
124
- in_act = input_a + input_b
125
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
126
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
127
- acts = t_act * s_act
128
- return acts
129
-
130
-
131
- def convert_pad_shape(pad_shape):
132
- l = pad_shape[::-1]
133
- pad_shape = [item for sublist in l for item in sublist]
134
- return pad_shape
135
-
136
-
137
- def shift_1d(x):
138
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
139
- return x
140
-
141
-
142
- def sequence_mask(length, max_length=None):
143
- if max_length is None:
144
- max_length = length.max()
145
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
146
- return x.unsqueeze(0) < length.unsqueeze(1)
147
-
148
-
149
- def avg_with_mask(x, mask):
150
- assert mask.dtype == torch.float, "Mask should be float"
151
-
152
- if mask.ndim == 2:
153
- mask = mask.unsqueeze(1)
154
-
155
- if mask.shape[1] == 1:
156
- mask = mask.expand_as(x)
157
-
158
- return (x * mask).sum() / mask.sum()
159
-
160
-
161
- def generate_path(duration, mask):
162
- """
163
- duration: [b, 1, t_x]
164
- mask: [b, 1, t_y, t_x]
165
- """
166
- device = duration.device
167
-
168
- b, _, t_y, t_x = mask.shape
169
- cum_duration = torch.cumsum(duration, -1)
170
-
171
- cum_duration_flat = cum_duration.view(b * t_x)
172
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
173
- path = path.view(b, t_x, t_y)
174
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
175
- path = path.unsqueeze(1).transpose(2, 3) * mask
176
- return path
177
-
178
-
179
- def clip_grad_value_(parameters, clip_value, norm_type=2):
180
- if isinstance(parameters, torch.Tensor):
181
- parameters = [parameters]
182
- parameters = list(filter(lambda p: p.grad is not None, parameters))
183
- norm_type = float(norm_type)
184
- if clip_value is not None:
185
- clip_value = float(clip_value)
186
-
187
- total_norm = 0
188
- for p in parameters:
189
- param_norm = p.grad.data.norm(norm_type)
190
- total_norm += param_norm.item() ** norm_type
191
- if clip_value is not None:
192
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
193
- total_norm = total_norm ** (1.0 / norm_type)
194
- return total_norm
195
-
196
-
197
- def log_norm(x, mean=-4, std=4, dim=2):
198
- """
199
- normalized log mel -> mel -> norm -> log(norm)
200
- """
201
- x = torch.log(torch.exp(x * std + mean).norm(dim=dim))
202
- return x
203
-
204
-
205
- def load_F0_models(path):
206
- # load F0 model
207
- from .JDC.model import JDCNet
208
-
209
- F0_model = JDCNet(num_class=1, seq_len=192)
210
- params = torch.load(path, map_location="cpu")["net"]
211
- F0_model.load_state_dict(params)
212
- _ = F0_model.train()
213
-
214
- return F0_model
215
-
216
-
217
- def modify_w2v_forward(self, output_layer=15):
218
- """
219
- change forward method of w2v encoder to get its intermediate layer output
220
- :param self:
221
- :param layer:
222
- :return:
223
- """
224
- from transformers.modeling_outputs import BaseModelOutput
225
-
226
- def forward(
227
- hidden_states,
228
- attention_mask=None,
229
- output_attentions=False,
230
- output_hidden_states=False,
231
- return_dict=True,
232
- ):
233
- all_hidden_states = () if output_hidden_states else None
234
- all_self_attentions = () if output_attentions else None
235
-
236
- conv_attention_mask = attention_mask
237
- if attention_mask is not None:
238
- # make sure padded tokens output 0
239
- hidden_states = hidden_states.masked_fill(
240
- ~attention_mask.bool().unsqueeze(-1), 0.0
241
- )
242
-
243
- # extend attention_mask
244
- attention_mask = 1.0 - attention_mask[:, None, None, :].to(
245
- dtype=hidden_states.dtype
246
- )
247
- attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
248
- attention_mask = attention_mask.expand(
249
- attention_mask.shape[0],
250
- 1,
251
- attention_mask.shape[-1],
252
- attention_mask.shape[-1],
253
- )
254
-
255
- hidden_states = self.dropout(hidden_states)
256
-
257
- if self.embed_positions is not None:
258
- relative_position_embeddings = self.embed_positions(hidden_states)
259
- else:
260
- relative_position_embeddings = None
261
-
262
- deepspeed_zero3_is_enabled = False
263
-
264
- for i, layer in enumerate(self.layers):
265
- if output_hidden_states:
266
- all_hidden_states = all_hidden_states + (hidden_states,)
267
-
268
- # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
269
- dropout_probability = torch.rand([])
270
-
271
- skip_the_layer = (
272
- True
273
- if self.training and (dropout_probability < self.config.layerdrop)
274
- else False
275
- )
276
- if not skip_the_layer or deepspeed_zero3_is_enabled:
277
- # under deepspeed zero3 all gpus must run in sync
278
- if self.gradient_checkpointing and self.training:
279
- layer_outputs = self._gradient_checkpointing_func(
280
- layer.__call__,
281
- hidden_states,
282
- attention_mask,
283
- relative_position_embeddings,
284
- output_attentions,
285
- conv_attention_mask,
286
- )
287
- else:
288
- layer_outputs = layer(
289
- hidden_states,
290
- attention_mask=attention_mask,
291
- relative_position_embeddings=relative_position_embeddings,
292
- output_attentions=output_attentions,
293
- conv_attention_mask=conv_attention_mask,
294
- )
295
- hidden_states = layer_outputs[0]
296
-
297
- if skip_the_layer:
298
- layer_outputs = (None, None)
299
-
300
- if output_attentions:
301
- all_self_attentions = all_self_attentions + (layer_outputs[1],)
302
-
303
- if i == output_layer - 1:
304
- break
305
-
306
- if output_hidden_states:
307
- all_hidden_states = all_hidden_states + (hidden_states,)
308
-
309
- if not return_dict:
310
- return tuple(
311
- v
312
- for v in [hidden_states, all_hidden_states, all_self_attentions]
313
- if v is not None
314
- )
315
- return BaseModelOutput(
316
- last_hidden_state=hidden_states,
317
- hidden_states=all_hidden_states,
318
- attentions=all_self_attentions,
319
- )
320
-
321
- return forward
322
-
323
-
324
- MATPLOTLIB_FLAG = False
325
-
326
-
327
- def plot_spectrogram_to_numpy(spectrogram):
328
- global MATPLOTLIB_FLAG
329
- if not MATPLOTLIB_FLAG:
330
- import matplotlib
331
- import logging
332
-
333
- matplotlib.use("Agg")
334
- MATPLOTLIB_FLAG = True
335
- mpl_logger = logging.getLogger("matplotlib")
336
- mpl_logger.setLevel(logging.WARNING)
337
- import matplotlib.pylab as plt
338
- import numpy as np
339
-
340
- fig, ax = plt.subplots(figsize=(10, 2))
341
- im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
342
- plt.colorbar(im, ax=ax)
343
- plt.xlabel("Frames")
344
- plt.ylabel("Channels")
345
- plt.tight_layout()
346
-
347
- fig.canvas.draw()
348
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
349
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
350
- plt.close()
351
- return data
352
-
353
-
354
- def normalize_f0(f0_sequence):
355
- # Remove unvoiced frames (replace with -1)
356
- voiced_indices = np.where(f0_sequence > 0)[0]
357
- f0_voiced = f0_sequence[voiced_indices]
358
-
359
- # Convert to log scale
360
- log_f0 = np.log2(f0_voiced)
361
-
362
- # Calculate mean and standard deviation
363
- mean_f0 = np.mean(log_f0)
364
- std_f0 = np.std(log_f0)
365
-
366
- # Normalize the F0 sequence
367
- normalized_f0 = (log_f0 - mean_f0) / std_f0
368
-
369
- # Create the normalized F0 sequence with unvoiced frames
370
- normalized_sequence = np.zeros_like(f0_sequence)
371
- normalized_sequence[voiced_indices] = normalized_f0
372
- normalized_sequence[f0_sequence <= 0] = -1 # Assign -1 to unvoiced frames
373
-
374
- return normalized_sequence
375
-
376
-
377
- def build_model(args, stage="DiT"):
378
- if stage == "DiT":
379
- from modules.flow_matching import CFM
380
- from modules.length_regulator import InterpolateRegulator
381
-
382
- length_regulator = InterpolateRegulator(
383
- channels=args.length_regulator.channels,
384
- sampling_ratios=args.length_regulator.sampling_ratios,
385
- is_discrete=args.length_regulator.is_discrete,
386
- in_channels=args.length_regulator.in_channels if hasattr(args.length_regulator, "in_channels") else None,
387
- vector_quantize=args.length_regulator.vector_quantize if hasattr(args.length_regulator, "vector_quantize") else False,
388
- codebook_size=args.length_regulator.content_codebook_size,
389
- n_codebooks=args.length_regulator.n_codebooks if hasattr(args.length_regulator, "n_codebooks") else 1,
390
- quantizer_dropout=args.length_regulator.quantizer_dropout if hasattr(args.length_regulator, "quantizer_dropout") else 0.0,
391
- f0_condition=args.length_regulator.f0_condition if hasattr(args.length_regulator, "f0_condition") else False,
392
- n_f0_bins=args.length_regulator.n_f0_bins if hasattr(args.length_regulator, "n_f0_bins") else 512,
393
- )
394
- cfm = CFM(args)
395
- nets = Munch(
396
- cfm=cfm,
397
- length_regulator=length_regulator,
398
- )
399
- elif stage == 'codec':
400
- from dac.model.dac import Encoder
401
- from modules.quantize import (
402
- FAquantizer,
403
- )
404
-
405
- encoder = Encoder(
406
- d_model=args.DAC.encoder_dim,
407
- strides=args.DAC.encoder_rates,
408
- d_latent=1024,
409
- causal=args.causal,
410
- lstm=args.lstm,
411
- )
412
-
413
- quantizer = FAquantizer(
414
- in_dim=1024,
415
- n_p_codebooks=1,
416
- n_c_codebooks=args.n_c_codebooks,
417
- n_t_codebooks=2,
418
- n_r_codebooks=3,
419
- codebook_size=1024,
420
- codebook_dim=8,
421
- quantizer_dropout=0.5,
422
- causal=args.causal,
423
- separate_prosody_encoder=args.separate_prosody_encoder,
424
- timbre_norm=args.timbre_norm,
425
- )
426
-
427
- nets = Munch(
428
- encoder=encoder,
429
- quantizer=quantizer,
430
- )
431
- else:
432
- raise ValueError(f"Unknown stage: {stage}")
433
-
434
- return nets
435
-
436
-
437
- def load_checkpoint(
438
- model,
439
- optimizer,
440
- path,
441
- load_only_params=True,
442
- ignore_modules=[],
443
- is_distributed=False,
444
- ):
445
- state = torch.load(path, map_location="cpu")
446
- params = state["net"]
447
- for key in model:
448
- if key in params and key not in ignore_modules:
449
- if not is_distributed:
450
- # strip prefix of DDP (module.), create a new OrderedDict that does not contain the prefix
451
- for k in list(params[key].keys()):
452
- if k.startswith("module."):
453
- params[key][k[len("module.") :]] = params[key][k]
454
- del params[key][k]
455
- model_state_dict = model[key].state_dict()
456
- # 过滤出形状匹配的键值对
457
- filtered_state_dict = {
458
- k: v
459
- for k, v in params[key].items()
460
- if k in model_state_dict and v.shape == model_state_dict[k].shape
461
- }
462
- skipped_keys = set(params[key].keys()) - set(filtered_state_dict.keys())
463
- if skipped_keys:
464
- print(
465
- f"Warning: Skipped loading some keys due to shape mismatch: {skipped_keys}"
466
- )
467
- print("%s loaded" % key)
468
- model[key].load_state_dict(filtered_state_dict, strict=False)
469
- _ = [model[key].eval() for key in model]
470
-
471
- if not load_only_params:
472
- epoch = state["epoch"] + 1
473
- iters = state["iters"]
474
- optimizer.load_state_dict(state["optimizer"])
475
- optimizer.load_scheduler_state_dict(state["scheduler"])
476
-
477
- else:
478
- epoch = 0
479
- iters = 0
480
-
481
- return model, optimizer, epoch, iters
482
-
483
-
484
- def recursive_munch(d):
485
- if isinstance(d, dict):
486
- return Munch((k, recursive_munch(v)) for k, v in d.items())
487
- elif isinstance(d, list):
488
- return [recursive_munch(v) for v in d]
489
- else:
490
- return d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/diffusion_transformer.py DELETED
@@ -1,240 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import math
4
-
5
- from modules.gpt_fast.model import ModelArgs, Transformer
6
- # from modules.torchscript_modules.gpt_fast_model import ModelArgs, Transformer
7
- from modules.wavenet import WN
8
- from modules.commons import sequence_mask
9
-
10
- from torch.nn.utils import weight_norm
11
-
12
- def modulate(x, shift, scale):
13
- return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
14
-
15
-
16
- #################################################################################
17
- # Embedding Layers for Timesteps and Class Labels #
18
- #################################################################################
19
-
20
- class TimestepEmbedder(nn.Module):
21
- """
22
- Embeds scalar timesteps into vector representations.
23
- """
24
- def __init__(self, hidden_size, frequency_embedding_size=256):
25
- super().__init__()
26
- self.mlp = nn.Sequential(
27
- nn.Linear(frequency_embedding_size, hidden_size, bias=True),
28
- nn.SiLU(),
29
- nn.Linear(hidden_size, hidden_size, bias=True),
30
- )
31
- self.frequency_embedding_size = frequency_embedding_size
32
- self.max_period = 10000
33
- self.scale = 1000
34
-
35
- half = frequency_embedding_size // 2
36
- freqs = torch.exp(
37
- -math.log(self.max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
38
- )
39
- self.register_buffer("freqs", freqs)
40
-
41
- def timestep_embedding(self, t):
42
- """
43
- Create sinusoidal timestep embeddings.
44
- :param t: a 1-D Tensor of N indices, one per batch element.
45
- These may be fractional.
46
- :param dim: the dimension of the output.
47
- :param max_period: controls the minimum frequency of the embeddings.
48
- :return: an (N, D) Tensor of positional embeddings.
49
- """
50
- # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
51
-
52
- args = self.scale * t[:, None].float() * self.freqs[None]
53
- embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
54
- if self.frequency_embedding_size % 2:
55
- embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
56
- return embedding
57
-
58
- def forward(self, t):
59
- t_freq = self.timestep_embedding(t)
60
- t_emb = self.mlp(t_freq)
61
- return t_emb
62
-
63
-
64
- class StyleEmbedder(nn.Module):
65
- """
66
- Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
67
- """
68
- def __init__(self, input_size, hidden_size, dropout_prob):
69
- super().__init__()
70
- use_cfg_embedding = dropout_prob > 0
71
- self.embedding_table = nn.Embedding(int(use_cfg_embedding), hidden_size)
72
- self.style_in = weight_norm(nn.Linear(input_size, hidden_size, bias=True))
73
- self.input_size = input_size
74
- self.dropout_prob = dropout_prob
75
-
76
- def forward(self, labels, train, force_drop_ids=None):
77
- use_dropout = self.dropout_prob > 0
78
- if (train and use_dropout) or (force_drop_ids is not None):
79
- labels = self.token_drop(labels, force_drop_ids)
80
- else:
81
- labels = self.style_in(labels)
82
- embeddings = labels
83
- return embeddings
84
-
85
- class FinalLayer(nn.Module):
86
- """
87
- The final layer of DiT.
88
- """
89
- def __init__(self, hidden_size, patch_size, out_channels):
90
- super().__init__()
91
- self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
92
- self.linear = weight_norm(nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True))
93
- self.adaLN_modulation = nn.Sequential(
94
- nn.SiLU(),
95
- nn.Linear(hidden_size, 2 * hidden_size, bias=True)
96
- )
97
-
98
- def forward(self, x, c):
99
- shift, scale = self.adaLN_modulation(c).chunk(2, dim=1)
100
- x = modulate(self.norm_final(x), shift, scale)
101
- x = self.linear(x)
102
- return x
103
-
104
- class DiT(torch.nn.Module):
105
- def __init__(
106
- self,
107
- args
108
- ):
109
- super(DiT, self).__init__()
110
- self.time_as_token = args.DiT.time_as_token if hasattr(args.DiT, 'time_as_token') else False
111
- self.style_as_token = args.DiT.style_as_token if hasattr(args.DiT, 'style_as_token') else False
112
- self.uvit_skip_connection = args.DiT.uvit_skip_connection if hasattr(args.DiT, 'uvit_skip_connection') else False
113
- model_args = ModelArgs(
114
- block_size=16384,#args.DiT.block_size,
115
- n_layer=args.DiT.depth,
116
- n_head=args.DiT.num_heads,
117
- dim=args.DiT.hidden_dim,
118
- head_dim=args.DiT.hidden_dim // args.DiT.num_heads,
119
- vocab_size=1024,
120
- uvit_skip_connection=self.uvit_skip_connection,
121
- )
122
- self.transformer = Transformer(model_args)
123
- self.in_channels = args.DiT.in_channels
124
- self.out_channels = args.DiT.in_channels
125
- self.num_heads = args.DiT.num_heads
126
-
127
- self.x_embedder = weight_norm(nn.Linear(args.DiT.in_channels, args.DiT.hidden_dim, bias=True))
128
-
129
- self.content_type = args.DiT.content_type # 'discrete' or 'continuous'
130
- self.content_codebook_size = args.DiT.content_codebook_size # for discrete content
131
- self.content_dim = args.DiT.content_dim # for continuous content
132
- self.cond_embedder = nn.Embedding(args.DiT.content_codebook_size, args.DiT.hidden_dim) # discrete content
133
- self.cond_projection = nn.Linear(args.DiT.content_dim, args.DiT.hidden_dim, bias=True) # continuous content
134
-
135
- self.is_causal = args.DiT.is_causal
136
-
137
- self.n_f0_bins = args.DiT.n_f0_bins
138
- self.f0_bins = torch.arange(2, 1024, 1024 // args.DiT.n_f0_bins)
139
- self.f0_embedder = nn.Embedding(args.DiT.n_f0_bins, args.DiT.hidden_dim)
140
- self.f0_condition = args.DiT.f0_condition
141
-
142
- self.t_embedder = TimestepEmbedder(args.DiT.hidden_dim)
143
- self.t_embedder2 = TimestepEmbedder(args.wavenet.hidden_dim)
144
- # self.style_embedder1 = weight_norm(nn.Linear(1024, args.DiT.hidden_dim, bias=True))
145
- # self.style_embedder2 = weight_norm(nn.Linear(1024, args.style_encoder.dim, bias=True))
146
-
147
- input_pos = torch.arange(16384)
148
- self.register_buffer("input_pos", input_pos)
149
-
150
- self.conv1 = nn.Linear(args.DiT.hidden_dim, args.wavenet.hidden_dim)
151
- self.conv2 = nn.Conv1d(args.wavenet.hidden_dim, args.DiT.in_channels, 1)
152
- self.final_layer_type = args.DiT.final_layer_type # mlp or wavenet
153
- if self.final_layer_type == 'wavenet':
154
- self.wavenet = WN(hidden_channels=args.wavenet.hidden_dim,
155
- kernel_size=args.wavenet.kernel_size,
156
- dilation_rate=args.wavenet.dilation_rate,
157
- n_layers=args.wavenet.num_layers,
158
- gin_channels=args.wavenet.hidden_dim,
159
- p_dropout=args.wavenet.p_dropout,
160
- causal=False)
161
- self.final_layer = FinalLayer(args.wavenet.hidden_dim, 1, args.wavenet.hidden_dim)
162
- else:
163
- self.final_mlp = nn.Sequential(
164
- nn.Linear(args.DiT.hidden_dim, args.DiT.hidden_dim),
165
- nn.SiLU(),
166
- nn.Linear(args.DiT.hidden_dim, args.DiT.in_channels),
167
- )
168
- self.transformer_style_condition = args.DiT.style_condition
169
- self.wavenet_style_condition = args.wavenet.style_condition
170
- assert args.DiT.style_condition == args.wavenet.style_condition
171
-
172
- self.class_dropout_prob = args.DiT.class_dropout_prob
173
- self.content_mask_embedder = nn.Embedding(1, args.DiT.hidden_dim)
174
- self.res_projection = nn.Linear(args.DiT.hidden_dim, args.wavenet.hidden_dim) # residual connection from tranformer output to final output
175
- self.long_skip_connection = args.DiT.long_skip_connection
176
- self.skip_linear = nn.Linear(args.DiT.hidden_dim + args.DiT.in_channels, args.DiT.hidden_dim)
177
-
178
- self.cond_x_merge_linear = nn.Linear(args.DiT.hidden_dim + args.DiT.in_channels * 2 +
179
- args.style_encoder.dim * self.transformer_style_condition * (not self.style_as_token),
180
- args.DiT.hidden_dim)
181
- if self.style_as_token:
182
- self.style_in = nn.Linear(args.style_encoder.dim, args.DiT.hidden_dim)
183
-
184
- def setup_caches(self, max_batch_size, max_seq_length):
185
- self.transformer.setup_caches(max_batch_size, max_seq_length, use_kv_cache=False)
186
- def forward(self, x, prompt_x, x_lens, t, style, cond, f0=None, mask_content=False):
187
- class_dropout = False
188
- if self.training and torch.rand(1) < self.class_dropout_prob:
189
- class_dropout = True
190
- if not self.training and mask_content:
191
- class_dropout = True
192
- # cond_in_module = self.cond_embedder if self.content_type == 'discrete' else self.cond_projection
193
- cond_in_module = self.cond_projection
194
-
195
- B, _, T = x.size()
196
-
197
-
198
- t1 = self.t_embedder(t) # (N, D)
199
-
200
- cond = cond_in_module(cond)
201
- if self.f0_condition and f0 is not None:
202
- quantized_f0 = torch.bucketize(f0, self.f0_bins.to(f0.device)) # (N, T)
203
- cond = cond + self.f0_embedder(quantized_f0)
204
-
205
- x = x.transpose(1, 2)
206
- prompt_x = prompt_x.transpose(1, 2)
207
-
208
- x_in = torch.cat([x, prompt_x, cond], dim=-1)
209
- if self.transformer_style_condition and not self.style_as_token:
210
- x_in = torch.cat([x_in, style[:, None, :].repeat(1, T, 1)], dim=-1)
211
- if class_dropout:
212
- x_in[..., self.in_channels:] = x_in[..., self.in_channels:] * 0
213
- x_in = self.cond_x_merge_linear(x_in) # (N, T, D)
214
-
215
- if self.style_as_token:
216
- style = self.style_in(style)
217
- style = torch.zeros_like(style) if class_dropout else style
218
- x_in = torch.cat([style.unsqueeze(1), x_in], dim=1)
219
- if self.time_as_token:
220
- x_in = torch.cat([t1.unsqueeze(1), x_in], dim=1)
221
- x_mask = sequence_mask(x_lens + self.style_as_token + self.time_as_token).to(x.device).unsqueeze(1)
222
- input_pos = self.input_pos[:x_in.size(1)] # (T,)
223
- x_mask_expanded = x_mask[:, None, :].repeat(1, 1, x_in.size(1), 1) if not self.is_causal else None
224
- x_res = self.transformer(x_in, None if self.time_as_token else t1.unsqueeze(1), input_pos, x_mask_expanded)
225
- x_res = x_res[:, 1:] if self.time_as_token else x_res
226
- x_res = x_res[:, 1:] if self.style_as_token else x_res
227
- if self.long_skip_connection:
228
- x_res = self.skip_linear(torch.cat([x_res, x], dim=-1))
229
- if self.final_layer_type == 'wavenet':
230
- x = self.conv1(x_res)
231
- x = x.transpose(1, 2)
232
- t2 = self.t_embedder2(t)
233
- x = self.wavenet(x, x_mask, g=t2.unsqueeze(2)).transpose(1, 2) + self.res_projection(
234
- x_res) # long residual connection
235
- x = self.final_layer(x, t1).transpose(1, 2)
236
- x = self.conv2(x)
237
- else:
238
- x = self.final_mlp(x_res)
239
- x = x.transpose(1, 2)
240
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/encodec.py DELETED
@@ -1,292 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """Convolutional layers wrappers and utilities."""
8
-
9
- import math
10
- import typing as tp
11
- import warnings
12
-
13
- import torch
14
- from torch import nn
15
- from torch.nn import functional as F
16
- from torch.nn.utils import spectral_norm, weight_norm
17
-
18
- import typing as tp
19
-
20
- import einops
21
-
22
-
23
- class ConvLayerNorm(nn.LayerNorm):
24
- """
25
- Convolution-friendly LayerNorm that moves channels to last dimensions
26
- before running the normalization and moves them back to original position right after.
27
- """
28
- def __init__(self, normalized_shape: tp.Union[int, tp.List[int], torch.Size], **kwargs):
29
- super().__init__(normalized_shape, **kwargs)
30
-
31
- def forward(self, x):
32
- x = einops.rearrange(x, 'b ... t -> b t ...')
33
- x = super().forward(x)
34
- x = einops.rearrange(x, 'b t ... -> b ... t')
35
- return
36
-
37
-
38
- CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm',
39
- 'time_layer_norm', 'layer_norm', 'time_group_norm'])
40
-
41
-
42
- def apply_parametrization_norm(module: nn.Module, norm: str = 'none') -> nn.Module:
43
- assert norm in CONV_NORMALIZATIONS
44
- if norm == 'weight_norm':
45
- return weight_norm(module)
46
- elif norm == 'spectral_norm':
47
- return spectral_norm(module)
48
- else:
49
- # We already check was in CONV_NORMALIZATION, so any other choice
50
- # doesn't need reparametrization.
51
- return module
52
-
53
-
54
- def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs) -> nn.Module:
55
- """Return the proper normalization module. If causal is True, this will ensure the returned
56
- module is causal, or return an error if the normalization doesn't support causal evaluation.
57
- """
58
- assert norm in CONV_NORMALIZATIONS
59
- if norm == 'layer_norm':
60
- assert isinstance(module, nn.modules.conv._ConvNd)
61
- return ConvLayerNorm(module.out_channels, **norm_kwargs)
62
- elif norm == 'time_group_norm':
63
- if causal:
64
- raise ValueError("GroupNorm doesn't support causal evaluation.")
65
- assert isinstance(module, nn.modules.conv._ConvNd)
66
- return nn.GroupNorm(1, module.out_channels, **norm_kwargs)
67
- else:
68
- return nn.Identity()
69
-
70
-
71
- def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int,
72
- padding_total: int = 0) -> int:
73
- """See `pad_for_conv1d`.
74
- """
75
- length = x.shape[-1]
76
- n_frames = (length - kernel_size + padding_total) / stride + 1
77
- ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
78
- return ideal_length - length
79
-
80
-
81
- def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0):
82
- """Pad for a convolution to make sure that the last window is full.
83
- Extra padding is added at the end. This is required to ensure that we can rebuild
84
- an output of the same length, as otherwise, even with padding, some time steps
85
- might get removed.
86
- For instance, with total padding = 4, kernel size = 4, stride = 2:
87
- 0 0 1 2 3 4 5 0 0 # (0s are padding)
88
- 1 2 3 # (output frames of a convolution, last 0 is never used)
89
- 0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding)
90
- 1 2 3 4 # once you removed padding, we are missing one time step !
91
- """
92
- extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
93
- return F.pad(x, (0, extra_padding))
94
-
95
-
96
- def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'zero', value: float = 0.):
97
- """Tiny wrapper around F.pad, just to allow for reflect padding on small input.
98
- If this is the case, we insert extra 0 padding to the right before the reflection happen.
99
- """
100
- length = x.shape[-1]
101
- padding_left, padding_right = paddings
102
- assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
103
- if mode == 'reflect':
104
- max_pad = max(padding_left, padding_right)
105
- extra_pad = 0
106
- if length <= max_pad:
107
- extra_pad = max_pad - length + 1
108
- x = F.pad(x, (0, extra_pad))
109
- padded = F.pad(x, paddings, mode, value)
110
- end = padded.shape[-1] - extra_pad
111
- return padded[..., :end]
112
- else:
113
- return F.pad(x, paddings, mode, value)
114
-
115
-
116
- def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]):
117
- """Remove padding from x, handling properly zero padding. Only for 1d!"""
118
- padding_left, padding_right = paddings
119
- assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
120
- assert (padding_left + padding_right) <= x.shape[-1]
121
- end = x.shape[-1] - padding_right
122
- return x[..., padding_left: end]
123
-
124
-
125
- class NormConv1d(nn.Module):
126
- """Wrapper around Conv1d and normalization applied to this conv
127
- to provide a uniform interface across normalization approaches.
128
- """
129
- def __init__(self, *args, causal: bool = False, norm: str = 'none',
130
- norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
131
- super().__init__()
132
- self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm)
133
- self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs)
134
- self.norm_type = norm
135
-
136
- def forward(self, x):
137
- x = self.conv(x)
138
- x = self.norm(x)
139
- return x
140
-
141
-
142
- class NormConv2d(nn.Module):
143
- """Wrapper around Conv2d and normalization applied to this conv
144
- to provide a uniform interface across normalization approaches.
145
- """
146
- def __init__(self, *args, norm: str = 'none',
147
- norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
148
- super().__init__()
149
- self.conv = apply_parametrization_norm(nn.Conv2d(*args, **kwargs), norm)
150
- self.norm = get_norm_module(self.conv, causal=False, norm=norm, **norm_kwargs)
151
- self.norm_type = norm
152
-
153
- def forward(self, x):
154
- x = self.conv(x)
155
- x = self.norm(x)
156
- return x
157
-
158
-
159
- class NormConvTranspose1d(nn.Module):
160
- """Wrapper around ConvTranspose1d and normalization applied to this conv
161
- to provide a uniform interface across normalization approaches.
162
- """
163
- def __init__(self, *args, causal: bool = False, norm: str = 'none',
164
- norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
165
- super().__init__()
166
- self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm)
167
- self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs)
168
- self.norm_type = norm
169
-
170
- def forward(self, x):
171
- x = self.convtr(x)
172
- x = self.norm(x)
173
- return x
174
-
175
-
176
- class NormConvTranspose2d(nn.Module):
177
- """Wrapper around ConvTranspose2d and normalization applied to this conv
178
- to provide a uniform interface across normalization approaches.
179
- """
180
- def __init__(self, *args, norm: str = 'none',
181
- norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
182
- super().__init__()
183
- self.convtr = apply_parametrization_norm(nn.ConvTranspose2d(*args, **kwargs), norm)
184
- self.norm = get_norm_module(self.convtr, causal=False, norm=norm, **norm_kwargs)
185
-
186
- def forward(self, x):
187
- x = self.convtr(x)
188
- x = self.norm(x)
189
- return x
190
-
191
-
192
- class SConv1d(nn.Module):
193
- """Conv1d with some builtin handling of asymmetric or causal padding
194
- and normalization.
195
- """
196
- def __init__(self, in_channels: int, out_channels: int,
197
- kernel_size: int, stride: int = 1, dilation: int = 1,
198
- groups: int = 1, bias: bool = True, causal: bool = False,
199
- norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {},
200
- pad_mode: str = 'reflect', **kwargs):
201
- super().__init__()
202
- # warn user on unusual setup between dilation and stride
203
- if stride > 1 and dilation > 1:
204
- warnings.warn('SConv1d has been initialized with stride > 1 and dilation > 1'
205
- f' (kernel_size={kernel_size} stride={stride}, dilation={dilation}).')
206
- self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride,
207
- dilation=dilation, groups=groups, bias=bias, causal=causal,
208
- norm=norm, norm_kwargs=norm_kwargs)
209
- self.causal = causal
210
- self.pad_mode = pad_mode
211
-
212
- def forward(self, x):
213
- B, C, T = x.shape
214
- kernel_size = self.conv.conv.kernel_size[0]
215
- stride = self.conv.conv.stride[0]
216
- dilation = self.conv.conv.dilation[0]
217
- kernel_size = (kernel_size - 1) * dilation + 1 # effective kernel size with dilations
218
- padding_total = kernel_size - stride
219
- extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
220
- if self.causal:
221
- # Left padding for causal
222
- x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode)
223
- else:
224
- # Asymmetric padding required for odd strides
225
- padding_right = padding_total // 2
226
- padding_left = padding_total - padding_right
227
- x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode)
228
- return self.conv(x)
229
-
230
-
231
- class SConvTranspose1d(nn.Module):
232
- """ConvTranspose1d with some builtin handling of asymmetric or causal padding
233
- and normalization.
234
- """
235
- def __init__(self, in_channels: int, out_channels: int,
236
- kernel_size: int, stride: int = 1, causal: bool = False,
237
- norm: str = 'none', trim_right_ratio: float = 1.,
238
- norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
239
- super().__init__()
240
- self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride,
241
- causal=causal, norm=norm, norm_kwargs=norm_kwargs)
242
- self.causal = causal
243
- self.trim_right_ratio = trim_right_ratio
244
- assert self.causal or self.trim_right_ratio == 1., \
245
- "`trim_right_ratio` != 1.0 only makes sense for causal convolutions"
246
- assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1.
247
-
248
- def forward(self, x):
249
- kernel_size = self.convtr.convtr.kernel_size[0]
250
- stride = self.convtr.convtr.stride[0]
251
- padding_total = kernel_size - stride
252
-
253
- y = self.convtr(x)
254
-
255
- # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
256
- # removed at the very end, when keeping only the right length for the output,
257
- # as removing it here would require also passing the length at the matching layer
258
- # in the encoder.
259
- if self.causal:
260
- # Trim the padding on the right according to the specified ratio
261
- # if trim_right_ratio = 1.0, trim everything from right
262
- padding_right = math.ceil(padding_total * self.trim_right_ratio)
263
- padding_left = padding_total - padding_right
264
- y = unpad1d(y, (padding_left, padding_right))
265
- else:
266
- # Asymmetric padding required for odd strides
267
- padding_right = padding_total // 2
268
- padding_left = padding_total - padding_right
269
- y = unpad1d(y, (padding_left, padding_right))
270
- return y
271
-
272
- class SLSTM(nn.Module):
273
- """
274
- LSTM without worrying about the hidden state, nor the layout of the data.
275
- Expects input as convolutional layout.
276
- """
277
- def __init__(self, dimension: int, num_layers: int = 2, skip: bool = True):
278
- super().__init__()
279
- self.skip = skip
280
- self.lstm = nn.LSTM(dimension, dimension, num_layers)
281
- self.hidden = None
282
-
283
- def forward(self, x):
284
- x = x.permute(2, 0, 1)
285
- if self.training:
286
- y, _ = self.lstm(x)
287
- else:
288
- y, self.hidden = self.lstm(x, self.hidden)
289
- if self.skip:
290
- y = y + x
291
- y = y.permute(1, 2, 0)
292
- return y
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/flow_matching.py DELETED
@@ -1,155 +0,0 @@
1
- from abc import ABC
2
-
3
- import torch
4
- import torch.nn.functional as F
5
-
6
- from modules.diffusion_transformer import DiT
7
- from modules.commons import sequence_mask
8
-
9
- from tqdm import tqdm
10
-
11
- class BASECFM(torch.nn.Module, ABC):
12
- def __init__(
13
- self,
14
- args,
15
- ):
16
- super().__init__()
17
- self.sigma_min = 1e-6
18
-
19
- self.estimator = None
20
-
21
- self.in_channels = args.DiT.in_channels
22
-
23
- self.criterion = torch.nn.MSELoss() if args.reg_loss_type == "l2" else torch.nn.L1Loss()
24
-
25
- if hasattr(args.DiT, 'zero_prompt_speech_token'):
26
- self.zero_prompt_speech_token = args.DiT.zero_prompt_speech_token
27
- else:
28
- self.zero_prompt_speech_token = False
29
-
30
- @torch.inference_mode()
31
- def inference(self, mu, x_lens, prompt, style, f0, n_timesteps, temperature=1.0, inference_cfg_rate=0.5):
32
- """Forward diffusion
33
-
34
- Args:
35
- mu (torch.Tensor): output of encoder
36
- shape: (batch_size, n_feats, mel_timesteps)
37
- mask (torch.Tensor): output_mask
38
- shape: (batch_size, 1, mel_timesteps)
39
- n_timesteps (int): number of diffusion steps
40
- temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
41
- spks (torch.Tensor, optional): speaker ids. Defaults to None.
42
- shape: (batch_size, spk_emb_dim)
43
- cond: Not used but kept for future purposes
44
-
45
- Returns:
46
- sample: generated mel-spectrogram
47
- shape: (batch_size, n_feats, mel_timesteps)
48
- """
49
- B, T = mu.size(0), mu.size(1)
50
- z = torch.randn([B, self.in_channels, T], device=mu.device) * temperature
51
- t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device)
52
- return self.solve_euler(z, x_lens, prompt, mu, style, f0, t_span, inference_cfg_rate)
53
-
54
- def solve_euler(self, x, x_lens, prompt, mu, style, f0, t_span, inference_cfg_rate=0.5):
55
- """
56
- Fixed euler solver for ODEs.
57
- Args:
58
- x (torch.Tensor): random noise
59
- t_span (torch.Tensor): n_timesteps interpolated
60
- shape: (n_timesteps + 1,)
61
- mu (torch.Tensor): output of encoder
62
- shape: (batch_size, n_feats, mel_timesteps)
63
- mask (torch.Tensor): output_mask
64
- shape: (batch_size, 1, mel_timesteps)
65
- spks (torch.Tensor, optional): speaker ids. Defaults to None.
66
- shape: (batch_size, spk_emb_dim)
67
- cond: Not used but kept for future purposes
68
- """
69
- t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0]
70
-
71
- # I am storing this because I can later plot it by putting a debugger here and saving it to a file
72
- # Or in future might add like a return_all_steps flag
73
- sol = []
74
- # apply prompt
75
- prompt_len = prompt.size(-1)
76
- prompt_x = torch.zeros_like(x)
77
- prompt_x[..., :prompt_len] = prompt[..., :prompt_len]
78
- x[..., :prompt_len] = 0
79
- if self.zero_prompt_speech_token:
80
- mu[..., :prompt_len] = 0
81
- for step in tqdm(range(1, len(t_span))):
82
- dphi_dt = self.estimator(x, prompt_x, x_lens, t.unsqueeze(0), style, mu, f0)
83
- # Classifier-Free Guidance inference introduced in VoiceBox
84
- if inference_cfg_rate > 0:
85
- cfg_dphi_dt = self.estimator(
86
- x, torch.zeros_like(prompt_x), x_lens, t.unsqueeze(0),
87
- torch.zeros_like(style),
88
- torch.zeros_like(mu), None
89
- )
90
- dphi_dt = ((1.0 + inference_cfg_rate) * dphi_dt -
91
- inference_cfg_rate * cfg_dphi_dt)
92
- x = x + dt * dphi_dt
93
- t = t + dt
94
- sol.append(x)
95
- if step < len(t_span) - 1:
96
- dt = t_span[step + 1] - t
97
- x[:, :, :prompt_len] = 0
98
-
99
- return sol[-1]
100
-
101
- def forward(self, x1, x_lens, prompt_lens, mu, style, f0=None):
102
- """Computes diffusion loss
103
-
104
- Args:
105
- x1 (torch.Tensor): Target
106
- shape: (batch_size, n_feats, mel_timesteps)
107
- mask (torch.Tensor): target mask
108
- shape: (batch_size, 1, mel_timesteps)
109
- mu (torch.Tensor): output of encoder
110
- shape: (batch_size, n_feats, mel_timesteps)
111
- spks (torch.Tensor, optional): speaker embedding. Defaults to None.
112
- shape: (batch_size, spk_emb_dim)
113
-
114
- Returns:
115
- loss: conditional flow matching loss
116
- y: conditional flow
117
- shape: (batch_size, n_feats, mel_timesteps)
118
- """
119
- b, _, t = x1.shape
120
-
121
- # random timestep
122
- t = torch.rand([b, 1, 1], device=mu.device, dtype=x1.dtype)
123
- # sample noise p(x_0)
124
- z = torch.randn_like(x1)
125
-
126
- y = (1 - (1 - self.sigma_min) * t) * z + t * x1
127
- u = x1 - (1 - self.sigma_min) * z
128
-
129
- prompt = torch.zeros_like(x1)
130
- for bib in range(b):
131
- prompt[bib, :, :prompt_lens[bib]] = x1[bib, :, :prompt_lens[bib]]
132
- # range covered by prompt are set to 0
133
- y[bib, :, :prompt_lens[bib]] = 0
134
- if self.zero_prompt_speech_token:
135
- mu[bib, :, :prompt_lens[bib]] = 0
136
-
137
- estimator_out = self.estimator(y, prompt, x_lens, t.squeeze(), style, mu, f0)
138
- loss = 0
139
- for bib in range(b):
140
- loss += self.criterion(estimator_out[bib, :, prompt_lens[bib]:x_lens[bib]], u[bib, :, prompt_lens[bib]:x_lens[bib]])
141
- loss /= b
142
-
143
- return loss, y
144
-
145
-
146
-
147
- class CFM(BASECFM):
148
- def __init__(self, args):
149
- super().__init__(
150
- args
151
- )
152
- if args.dit_type == "DiT":
153
- self.estimator = DiT(args)
154
- else:
155
- raise NotImplementedError(f"Unknown diffusion type {args.dit_type}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/gpt_fast/generate.py DELETED
@@ -1,436 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
- import itertools
7
- import sys
8
- import time
9
- from pathlib import Path
10
- from typing import Optional, Tuple
11
-
12
- import torch
13
- import torch._dynamo.config
14
- import torch._inductor.config
15
-
16
- def device_sync(device):
17
- if "cuda" in device:
18
- torch.cuda.synchronize(device)
19
- elif ("cpu" in device) or ("mps" in device):
20
- pass
21
- else:
22
- print(f"device={device} is not yet suppported")
23
-
24
-
25
- torch._inductor.config.coordinate_descent_tuning = True
26
- torch._inductor.config.triton.unique_kernel_names = True
27
- torch._inductor.config.fx_graph_cache = True # Experimental feature to reduce compilation times, will be on by default in future
28
-
29
- default_device = 'cuda' if torch.cuda.is_available() else 'cpu'
30
-
31
- # support running without installing as a package
32
- wd = Path(__file__).parent.parent.resolve()
33
- sys.path.append(str(wd))
34
-
35
- from model import Transformer
36
- from tokenizer import get_tokenizer
37
-
38
- def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization
39
- q = torch.empty_like(probs_sort).exponential_(1)
40
- return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
41
-
42
- def logits_to_probs(logits, temperature: float = 1.0, top_k: Optional[int] = None):
43
- logits = logits / max(temperature, 1e-5)
44
-
45
- if top_k is not None:
46
- v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
47
- pivot = v.select(-1, -1).unsqueeze(-1)
48
- logits = torch.where(logits < pivot, -float("Inf"), logits)
49
- probs = torch.nn.functional.softmax(logits, dim=-1)
50
- return probs
51
-
52
- def sample(logits, temperature: float = 1.0, top_k: Optional[int] = None):
53
- probs = logits_to_probs(logits[0, -1], temperature, top_k)
54
- idx_next = multinomial_sample_one_no_sync(probs)
55
- return idx_next, probs
56
-
57
- def prefill(model: Transformer, x: torch.Tensor, input_pos: torch.Tensor, **sampling_kwargs) -> torch.Tensor:
58
- # input_pos: [B, S]
59
- logits = model(x, input_pos)
60
- return sample(logits, **sampling_kwargs)[0]
61
-
62
- def decode_one_token(model: Transformer, x: torch.Tensor, input_pos: torch.Tensor, **sampling_kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
63
- # input_pos: [B, 1]
64
- assert input_pos.shape[-1] == 1
65
- logits = model(x, input_pos)
66
- return sample(logits, **sampling_kwargs)
67
-
68
- def decode_n_tokens(model: Transformer, cur_token: torch.Tensor, input_pos: torch.Tensor, num_new_tokens: int, callback=lambda _: _, **sampling_kwargs):
69
- new_tokens, new_probs = [], []
70
- for i in range(num_new_tokens):
71
- with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True): # Actually better for Inductor to codegen attention here
72
- next_token, next_prob = decode_one_token(
73
- model, cur_token, input_pos, **sampling_kwargs
74
- )
75
- input_pos += 1
76
- new_tokens.append(next_token.clone())
77
- callback(new_tokens[-1])
78
- new_probs.append(next_prob.clone())
79
- cur_token = next_token.view(1, -1)
80
-
81
- return new_tokens, new_probs
82
-
83
-
84
- def model_forward(model, x, input_pos):
85
- return model(x, input_pos)
86
-
87
- def speculative_decode(
88
- model: Transformer,
89
- draft_model: Transformer,
90
- cur_token: torch.Tensor,
91
- input_pos: int,
92
- speculate_k: int,
93
- **sampling_kwargs
94
- ) -> torch.Tensor:
95
- # draft model inference sequentially
96
- device = cur_token.device
97
- orig_input_pos = torch.tensor([input_pos], dtype=torch.int64, device=cur_token.device)
98
- draft_tokens, draft_probs = decode_n_tokens(draft_model, cur_token.view(1, -1), orig_input_pos.clone(), speculate_k, **sampling_kwargs)
99
-
100
- draft_tokens = torch.cat(draft_tokens)
101
- # parallel inference on target model using draft tokens
102
- target_logits = model_forward(
103
- model,
104
- torch.cat([cur_token.view(1), draft_tokens]).view(1, -1),
105
- torch.arange(input_pos, input_pos + speculate_k + 1, device=cur_token.device)
106
- )
107
- target_probs = logits_to_probs(target_logits[0], **sampling_kwargs)
108
- draft_probs = torch.stack(draft_probs)
109
- # q: target prob, p: draft prob
110
- # q >= p: always accept draft token
111
- # q < p: q/p prob to accept draft token
112
- p = draft_probs[torch.arange(0, speculate_k, device=device), draft_tokens]
113
- q = target_probs[torch.arange(0, speculate_k, device=device), draft_tokens]
114
- accept_draft_prob = torch.minimum(torch.ones(()), q[:speculate_k]/ p)
115
- rejected_locations = (torch.rand_like(accept_draft_prob) > accept_draft_prob).nonzero()
116
-
117
- if rejected_locations.shape[0] == 0: # All draft tokens have been accepted
118
- accept_length = speculate_k + 1
119
- last_token = multinomial_sample_one_no_sync(target_probs[-1])
120
- # fill last token into draft model
121
- model_forward(
122
- draft_model,
123
- draft_tokens[-1].view(1, -1),
124
- orig_input_pos + speculate_k,
125
- )
126
- return torch.cat([draft_tokens, last_token])
127
- else:
128
- accept_length = rejected_locations[0].item()
129
- p = draft_probs[accept_length]
130
- q = target_probs[accept_length]
131
- new = q - p
132
- new = torch.where(new > 0, new, 0.0)
133
- new = new / new.sum()
134
- next_token = multinomial_sample_one_no_sync(new)
135
- return torch.cat([draft_tokens[:accept_length], next_token])
136
-
137
- @torch.no_grad()
138
- def generate(
139
- model: Transformer,
140
- prompt: torch.Tensor,
141
- max_new_tokens: int,
142
- *,
143
- interactive: bool,
144
- draft_model: Transformer,
145
- speculate_k: Optional[int] = 8,
146
- callback = lambda x: x,
147
- **sampling_kwargs
148
- ) -> torch.Tensor:
149
- """
150
- Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
151
- """
152
-
153
- is_speculative = draft_model is not None
154
- # create an empty tensor of the expected final shape and fill in the current tokens
155
- T = prompt.size(0)
156
- T_new = T + max_new_tokens
157
- if interactive:
158
- max_seq_length = 350
159
- else:
160
- max_seq_length = min(T_new, model.config.block_size)
161
-
162
- device, dtype = prompt.device, prompt.dtype
163
- max_seq_length = max_seq_length + speculate_k + 1 if is_speculative else max_seq_length
164
- with torch.device(device):
165
- model.setup_caches(max_batch_size=1, max_seq_length=max_seq_length)
166
- if is_speculative and draft_model is not model:
167
- draft_model.setup_caches(max_batch_size=1, max_seq_length=max_seq_length)
168
-
169
- # create an empty tensor of the expected final shape and fill in the current tokens
170
- empty = torch.empty(T_new, dtype=dtype, device=device)
171
- empty[:T] = prompt
172
- seq = empty
173
- input_pos = torch.arange(0, T, device=device)
174
-
175
- next_token = prefill(model, prompt.view(1, -1), input_pos, **sampling_kwargs).clone()
176
- if is_speculative:
177
- prefill(draft_model, prompt.view(1, -1), input_pos, **sampling_kwargs)
178
- seq[T] = next_token
179
-
180
- input_pos = torch.tensor([T], device=device, dtype=torch.int)
181
- accept_counts = [0] * (speculate_k + 1)
182
-
183
- if is_speculative:
184
- input_pos = input_pos.item() # for speculative decoding easier to keep on host
185
- while input_pos < T_new - 1:
186
- cur_token = next_token.view(())
187
-
188
- next_tokens = speculative_decode(
189
- model, draft_model, cur_token, input_pos, speculate_k, **sampling_kwargs
190
- )
191
-
192
- accept_counts[len(next_tokens) - 1] += 1
193
- num_added = min(T_new - input_pos - 1, len(next_tokens))
194
- seq[input_pos + 1 : input_pos + num_added + 1] = next_tokens[: num_added]
195
- for i in next_tokens[: num_added,]:
196
- callback(i)
197
- input_pos = input_pos + num_added
198
- next_token = next_tokens[-1]
199
- else:
200
- generated_tokens, _ = decode_n_tokens(model, next_token.view(1, -1), input_pos, max_new_tokens - 1, callback=callback, **sampling_kwargs)
201
- seq[T + 1:] = torch.cat(generated_tokens)
202
-
203
- generate_stats = {
204
- 'accept_counts': accept_counts
205
- }
206
- return seq, generate_stats
207
-
208
- def encode_tokens(tokenizer, string, bos=True, device=default_device):
209
- tokens = tokenizer.encode(string)
210
- if bos:
211
- tokens = [tokenizer.bos_id()] + tokens
212
- return torch.tensor(tokens, dtype=torch.int, device=device)
213
-
214
- def _load_model(checkpoint_path, device, precision, use_tp):
215
- use_cuda = 'cuda' in device
216
- with torch.device('meta'):
217
- model = Transformer.from_name(checkpoint_path.parent.name)
218
-
219
- if "int8" in str(checkpoint_path):
220
- print("Using int8 weight-only quantization!")
221
- from quantize import WeightOnlyInt8QuantHandler
222
- simple_quantizer = WeightOnlyInt8QuantHandler(model)
223
- model = simple_quantizer.convert_for_runtime()
224
-
225
- if "int4" in str(checkpoint_path):
226
- print("Using int4 weight-only quantization!")
227
- path_comps = checkpoint_path.name.split(".")
228
- groupsize = int(path_comps[-2][1:])
229
- from quantize import WeightOnlyInt4QuantHandler
230
- simple_quantizer = WeightOnlyInt4QuantHandler(model, groupsize)
231
- model = simple_quantizer.convert_for_runtime()
232
-
233
- checkpoint = torch.load(str(checkpoint_path), mmap=True, weights_only=True)
234
- if "model" in checkpoint and "stories" in str(checkpoint_path):
235
- checkpoint = checkpoint["model"]
236
- model.load_state_dict(checkpoint, assign=True)
237
-
238
- if use_tp:
239
- from tp import apply_tp
240
- print("Applying tensor parallel to model ...")
241
- apply_tp(model)
242
-
243
- model = model.to(device=device, dtype=precision)
244
- return model.eval()
245
-
246
- def _get_model_size(model):
247
- model_size = 0
248
- for name, child in model.named_children():
249
- if not isinstance(child, torch.nn.Embedding):
250
- model_size += sum(
251
- [
252
- p.numel() * p.dtype.itemsize
253
- for p in itertools.chain(child.parameters(), child.buffers())
254
- ]
255
- )
256
- return model_size
257
-
258
- B_INST, E_INST = "[INST]", "[/INST]"
259
-
260
- def main(
261
- prompt: str = "Hello, my name is",
262
- interactive: bool = False,
263
- num_samples: int = 5,
264
- max_new_tokens: int = 100,
265
- top_k: int = 200,
266
- temperature: float = 0.8,
267
- checkpoint_path: Path = Path("checkpoints/meta-Transformer/Transformer-2-7b-chat-hf/model.pth"),
268
- compile: bool = True,
269
- compile_prefill: bool = False,
270
- profile: Optional[Path] = None,
271
- draft_checkpoint_path: Optional[Path] = None,
272
- speculate_k: int = 5,
273
- device=default_device,
274
- ) -> None:
275
- """Generates text samples based on a pre-trained Transformer model and tokenizer.
276
- """
277
- assert checkpoint_path.is_file(), checkpoint_path
278
-
279
- tokenizer_path = checkpoint_path.parent / "tokenizer.model"
280
- assert tokenizer_path.is_file(), str(tokenizer_path)
281
-
282
- global print
283
- from tp import maybe_init_dist
284
- rank = maybe_init_dist()
285
- use_tp = rank is not None
286
- if use_tp:
287
- if rank != 0:
288
- # only print on rank 0
289
- print = lambda *args, **kwargs: None
290
-
291
- print(f"Using device={device}")
292
- precision = torch.bfloat16
293
- is_speculative = draft_checkpoint_path is not None
294
- is_chat = "chat" in str(checkpoint_path)
295
-
296
- print("Loading model ...")
297
- t0 = time.time()
298
- model = _load_model(checkpoint_path, device, precision, use_tp)
299
-
300
- if is_speculative:
301
- draft_model = _load_model(draft_checkpoint_path, device, precision, use_tp)
302
- else:
303
- draft_model = None
304
-
305
- device_sync(device=device) # MKG
306
- print(f"Time to load model: {time.time() - t0:.02f} seconds")
307
-
308
- tokenizer = get_tokenizer(tokenizer_path, checkpoint_path)
309
-
310
- encoded = encode_tokens(tokenizer, prompt, bos=True, device=device)
311
- prompt_length = encoded.size(0)
312
-
313
- torch.manual_seed(1234)
314
- model_size = _get_model_size(model)
315
- if compile:
316
- if is_speculative and use_tp: # and ("cuda" in device):
317
- torch._inductor.config.triton.cudagraph_trees = False # Bug with cudagraph trees in this case
318
-
319
- if is_speculative:
320
- global model_forward, logits_to_prob
321
- model_forward = torch.compile(model_forward, mode="reduce-overhead", fullgraph=True)
322
-
323
- global decode_one_token, prefill
324
- decode_one_token = torch.compile(decode_one_token, mode="reduce-overhead", fullgraph=True)
325
-
326
- # Uncomment to squeeze more perf out of prefill
327
- if compile_prefill:
328
- prefill = torch.compile(prefill, fullgraph=True, dynamic=True)
329
-
330
-
331
- aggregate_metrics = {
332
- 'tokens_per_sec': [],
333
- 'accept_counts': [],
334
- }
335
- start = -1 if compile else 0
336
-
337
- for i in range(start, num_samples):
338
- device_sync(device=device) # MKG
339
- if i >= 0 and interactive:
340
- prompt = input("What is your prompt? ")
341
- if is_chat:
342
- prompt = f"{B_INST} {prompt.strip()} {E_INST}"
343
- encoded = encode_tokens(tokenizer, prompt, bos=True, device=device)
344
-
345
- if interactive and i >= 0:
346
- buffer = []
347
- period_id = tokenizer.encode('.')[0]
348
- done_generating = False
349
- def callback(x):
350
- nonlocal done_generating
351
- if done_generating:
352
- return
353
- buffer.append(tokenizer.decode([period_id] + x.tolist())[1:])
354
- if x.item() == tokenizer.eos_id():
355
- done_generating = True
356
- if len(buffer) == 4 or done_generating:
357
- print(''.join(buffer), end='', flush=True)
358
- buffer.clear()
359
- # print(, end='', flush=True)
360
- else:
361
- callback = lambda x : x
362
- t0 = time.perf_counter()
363
- import contextlib
364
- if (i != num_samples - 1 or not profile) or (use_tp and rank != 0):
365
- prof = contextlib.nullcontext()
366
- else:
367
- torch.profiler._utils._init_for_cuda_graphs()
368
- prof = torch.profiler.profile()
369
- with prof:
370
- y, metrics = generate(
371
- model,
372
- encoded,
373
- max_new_tokens,
374
- draft_model=draft_model,
375
- speculate_k=speculate_k,
376
- interactive=interactive,
377
- callback=callback,
378
- temperature=temperature,
379
- top_k=top_k,
380
- )
381
- aggregate_metrics['accept_counts'].append(metrics['accept_counts'])
382
- if i == -1:
383
- print(f"Compilation time: {time.perf_counter() - t0:.2f} seconds")
384
- continue
385
- if hasattr(prof, "export_chrome_trace"):
386
- if use_tp:
387
- prof.export_chrome_trace(f"{profile}_rank_{rank}.json")
388
- else:
389
- prof.export_chrome_trace(f"{profile}.json")
390
- device_sync(device=device) # MKG
391
- t = time.perf_counter() - t0
392
-
393
- if not interactive:
394
- print(tokenizer.decode(y.tolist()))
395
- else:
396
- print()
397
- tokens_generated = y.size(0) - prompt_length
398
- tokens_sec = tokens_generated / t
399
- aggregate_metrics['tokens_per_sec'].append(tokens_sec)
400
- print(f"Time for inference {i + 1}: {t:.02f} sec total, {tokens_sec:.02f} tokens/sec")
401
- print(f"Bandwidth achieved: {model_size * tokens_sec / 1e9:.02f} GB/s")
402
- print("==========")
403
- if is_speculative:
404
- counts_aggregated = [sum(i) for i in zip(*aggregate_metrics['accept_counts'])]
405
- acceptance_probs = [i/sum(counts_aggregated) for i in counts_aggregated]
406
- print(f"Acceptance probs: {acceptance_probs}")
407
- print(f"Mean Accepted: {sum([idx * i for idx, i in enumerate(counts_aggregated)])/sum(counts_aggregated)}")
408
-
409
- print(f"Average tokens/sec: {torch.mean(torch.tensor(aggregate_metrics['tokens_per_sec'])).item():.2f}")
410
- print(f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB")
411
-
412
-
413
- if __name__ == '__main__':
414
- import argparse
415
- parser = argparse.ArgumentParser(description='Your CLI description.')
416
-
417
- parser.add_argument('--prompt', type=str, default="Hello, my name is", help='Input prompt.')
418
- parser.add_argument('--interactive', action='store_true', help='Whether to launch in interactive mode')
419
- parser.add_argument('--num_samples', type=int, default=5, help='Number of samples.')
420
- parser.add_argument('--max_new_tokens', type=int, default=200, help='Maximum number of new tokens.')
421
- parser.add_argument('--top_k', type=int, default=200, help='Top-k for sampling.')
422
- parser.add_argument('--temperature', type=float, default=0.8, help='Temperature for sampling.')
423
- parser.add_argument('--checkpoint_path', type=Path, default=Path("checkpoints/meta-Transformer/Transformer-2-7b-chat-hf/model.pth"), help='Model checkpoint path.')
424
- parser.add_argument('--compile', action='store_true', help='Whether to compile the model.')
425
- parser.add_argument('--compile_prefill', action='store_true', help='Whether to compile the prefill (improves prefill perf, but higher compile times)')
426
- parser.add_argument('--profile', type=Path, default=None, help='Profile path.')
427
- parser.add_argument('--speculate_k', type=int, default=5, help='Speculative execution depth.')
428
- parser.add_argument('--draft_checkpoint_path', type=Path, default=None, help='Draft checkpoint path.')
429
- parser.add_argument('--device', type=str, default=default_device, help='Device to use')
430
-
431
- args = parser.parse_args()
432
- main(
433
- args.prompt, args.interactive, args.num_samples, args.max_new_tokens, args.top_k,
434
- args.temperature, args.checkpoint_path, args.compile, args.compile_prefill, args.profile, args.draft_checkpoint_path,
435
- args.speculate_k, args.device
436
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/gpt_fast/model.py DELETED
@@ -1,356 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
- from dataclasses import dataclass
7
- from typing import Optional
8
-
9
- import torch
10
- import torch.nn as nn
11
- from torch import Tensor
12
- from torch.nn import functional as F
13
-
14
-
15
- def find_multiple(n: int, k: int) -> int:
16
- if n % k == 0:
17
- return n
18
- return n + k - (n % k)
19
-
20
- class AdaptiveLayerNorm(nn.Module):
21
- r"""Adaptive Layer Normalization"""
22
-
23
- def __init__(self, d_model, norm) -> None:
24
- super(AdaptiveLayerNorm, self).__init__()
25
- self.project_layer = nn.Linear(d_model, 2 * d_model)
26
- self.norm = norm
27
- self.d_model = d_model
28
- self.eps = self.norm.eps
29
-
30
- def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor:
31
- if embedding is None:
32
- return self.norm(input)
33
- weight, bias = torch.split(
34
- self.project_layer(embedding),
35
- split_size_or_sections=self.d_model,
36
- dim=-1,
37
- )
38
- return weight * self.norm(input) + bias
39
-
40
-
41
- @dataclass
42
- class ModelArgs:
43
- block_size: int = 2048
44
- vocab_size: int = 32000
45
- n_layer: int = 32
46
- n_head: int = 32
47
- dim: int = 4096
48
- intermediate_size: int = None
49
- n_local_heads: int = -1
50
- head_dim: int = 64
51
- rope_base: float = 10000
52
- norm_eps: float = 1e-5
53
- has_cross_attention: bool = False
54
- context_dim: int = 0
55
- uvit_skip_connection: bool = False
56
-
57
- def __post_init__(self):
58
- if self.n_local_heads == -1:
59
- self.n_local_heads = self.n_head
60
- if self.intermediate_size is None:
61
- hidden_dim = 4 * self.dim
62
- n_hidden = int(2 * hidden_dim / 3)
63
- self.intermediate_size = find_multiple(n_hidden, 256)
64
- # self.head_dim = self.dim // self.n_head
65
-
66
- @classmethod
67
- def from_name(cls, name: str):
68
- if name in transformer_configs:
69
- return cls(**transformer_configs[name])
70
- # fuzzy search
71
- config = [config for config in transformer_configs if config.lower() in str(name).lower()]
72
-
73
- # We may have two or more configs matched (e.g. "7B" and "Mistral-7B"). Find the best config match,
74
- # take longer name (as it have more symbols matched)
75
- if len(config) > 1:
76
- config.sort(key=len, reverse=True)
77
- assert len(config[0]) != len(config[1]), name # make sure only one 'best' match
78
-
79
- return cls(**transformer_configs[config[0]])
80
-
81
-
82
- transformer_configs = {
83
- "CodeLlama-7b-Python-hf": dict(block_size=16384, vocab_size=32000, n_layer=32, dim=4096, rope_base=1000000),
84
- "7B": dict(n_layer=32, n_head=32, dim=4096),
85
- "13B": dict(n_layer=40, n_head=40, dim=5120),
86
- "30B": dict(n_layer=60, n_head=52, dim=6656),
87
- "34B": dict(n_layer=48, n_head=64, dim=8192, vocab_size=32000, n_local_heads=8, intermediate_size=22016,
88
- rope_base=1000000), # CodeLlama-34B-Python-hf
89
- "70B": dict(n_layer=80, n_head=64, dim=8192, n_local_heads=8, intermediate_size=28672),
90
- "Mistral-7B": dict(n_layer=32, n_head=32, n_local_heads=8, dim=4096, intermediate_size=14336, vocab_size=32000),
91
- "stories15M": dict(n_layer=6, n_head=6, dim=288),
92
- "stories110M": dict(n_layer=12, n_head=12, dim=768),
93
-
94
- "llama-3-8b": dict(block_size=8192, n_layer=32, n_head=32, n_local_heads=8, dim=4096, intermediate_size=14336,
95
- vocab_size=128256, rope_base=500000),
96
- "llama-3-70b": dict(block_size=8192, n_layer=80, n_head=64, n_local_heads=8, dim=8192, intermediate_size=28672,
97
- vocab_size=128256, rope_base=500000),
98
- }
99
-
100
-
101
- class KVCache(nn.Module):
102
- def __init__(self, max_batch_size, max_seq_length, n_heads, head_dim, dtype=torch.bfloat16):
103
- super().__init__()
104
- cache_shape = (max_batch_size, n_heads, max_seq_length, head_dim)
105
- self.register_buffer('k_cache', torch.zeros(cache_shape, dtype=dtype))
106
- self.register_buffer('v_cache', torch.zeros(cache_shape, dtype=dtype))
107
-
108
- def update(self, input_pos, k_val, v_val):
109
- # input_pos: [S], k_val: [B, H, S, D]
110
- assert input_pos.shape[0] == k_val.shape[2]
111
-
112
- k_out = self.k_cache
113
- v_out = self.v_cache
114
- k_out[:, :, input_pos] = k_val
115
- v_out[:, :, input_pos] = v_val
116
-
117
- return k_out, v_out
118
-
119
-
120
- class Transformer(nn.Module):
121
- def __init__(self, config: ModelArgs) -> None:
122
- super().__init__()
123
- self.config = config
124
-
125
- self.layers = nn.ModuleList(TransformerBlock(config) for _ in range(config.n_layer))
126
- self.norm = AdaptiveLayerNorm(config.dim, RMSNorm(config.dim, eps=config.norm_eps))
127
-
128
- self.freqs_cis: Optional[Tensor] = None
129
- self.mask_cache: Optional[Tensor] = None
130
- self.max_batch_size = -1
131
- self.max_seq_length = -1
132
-
133
- def setup_caches(self, max_batch_size, max_seq_length, use_kv_cache=True):
134
- if self.max_seq_length >= max_seq_length and self.max_batch_size >= max_batch_size:
135
- return
136
- head_dim = self.config.dim // self.config.n_head
137
- max_seq_length = find_multiple(max_seq_length, 8)
138
- self.max_seq_length = max_seq_length
139
- self.max_batch_size = max_batch_size
140
- dtype = self.norm.project_layer.weight.dtype
141
- device = self.norm.project_layer.weight.device
142
-
143
- if not self.training and use_kv_cache:
144
- for b in self.layers:
145
- b.attention.kv_cache = KVCache(max_batch_size, max_seq_length, self.config.n_local_heads, head_dim, dtype).to(device)
146
-
147
- self.freqs_cis = precompute_freqs_cis(self.config.block_size, self.config.head_dim,
148
- self.config.rope_base, dtype).to(device)
149
- self.causal_mask = torch.tril(torch.ones(self.max_seq_length, self.max_seq_length, dtype=torch.bool)).to(device)
150
- self.use_kv_cache = use_kv_cache
151
- self.uvit_skip_connection = self.config.uvit_skip_connection
152
- if self.uvit_skip_connection:
153
- self.layers_emit_skip = [i for i in range(self.config.n_layer) if i < self.config.n_layer // 2]
154
- self.layers_receive_skip = [i for i in range(self.config.n_layer) if i > self.config.n_layer // 2]
155
- else:
156
- self.layers_emit_skip = []
157
- self.layers_receive_skip = []
158
-
159
- def forward(self,
160
- x: Tensor,
161
- c: Tensor,
162
- input_pos: Optional[Tensor] = None,
163
- mask: Optional[Tensor] = None,
164
- context: Optional[Tensor] = None,
165
- context_input_pos: Optional[Tensor] = None,
166
- cross_attention_mask: Optional[Tensor] = None,
167
- ) -> Tensor:
168
- assert self.freqs_cis is not None, "Caches must be initialized first"
169
- if mask is None: # in case of non-causal model
170
- if not self.training and self.use_kv_cache:
171
- mask = self.causal_mask[None, None, input_pos]
172
- else:
173
- mask = self.causal_mask[None, None, input_pos]
174
- mask = mask[..., input_pos]
175
- freqs_cis = self.freqs_cis[input_pos]
176
- if context is not None:
177
- context_freqs_cis = self.freqs_cis[context_input_pos]
178
- else:
179
- context_freqs_cis = None
180
- skip_in_x_list = []
181
- for i, layer in enumerate(self.layers):
182
- if self.uvit_skip_connection and i in self.layers_receive_skip:
183
- skip_in_x = skip_in_x_list.pop(-1)
184
- else:
185
- skip_in_x = None
186
- x = layer(x, c, input_pos, freqs_cis, mask, context, context_freqs_cis, cross_attention_mask, skip_in_x)
187
- if self.uvit_skip_connection and i in self.layers_emit_skip:
188
- skip_in_x_list.append(x)
189
- x = self.norm(x, c)
190
- return x
191
-
192
- @classmethod
193
- def from_name(cls, name: str):
194
- return cls(ModelArgs.from_name(name))
195
-
196
-
197
- class TransformerBlock(nn.Module):
198
- def __init__(self, config: ModelArgs) -> None:
199
- super().__init__()
200
- self.attention = Attention(config)
201
- self.feed_forward = FeedForward(config)
202
- self.ffn_norm = AdaptiveLayerNorm(config.dim, RMSNorm(config.dim, eps=config.norm_eps))
203
- self.attention_norm = AdaptiveLayerNorm(config.dim, RMSNorm(config.dim, eps=config.norm_eps))
204
-
205
- if config.has_cross_attention:
206
- self.has_cross_attention = True
207
- self.cross_attention = Attention(config, is_cross_attention=True)
208
- self.cross_attention_norm = AdaptiveLayerNorm(config.dim, RMSNorm(config.dim, eps=config.norm_eps))
209
- else:
210
- self.has_cross_attention = False
211
-
212
- if config.uvit_skip_connection:
213
- self.skip_in_linear = nn.Linear(config.dim * 2, config.dim)
214
- self.uvit_skip_connection = True
215
- else:
216
- self.uvit_skip_connection = False
217
-
218
- def forward(self,
219
- x: Tensor,
220
- c: Tensor,
221
- input_pos: Tensor,
222
- freqs_cis: Tensor,
223
- mask: Tensor,
224
- context: Optional[Tensor] = None,
225
- context_freqs_cis: Optional[Tensor] = None,
226
- cross_attention_mask: Optional[Tensor] = None,
227
- skip_in_x: Optional[Tensor] = None,
228
- ) -> Tensor:
229
- if self.uvit_skip_connection and skip_in_x is not None:
230
- x = self.skip_in_linear(torch.cat([x, skip_in_x], dim=-1))
231
- h = x + self.attention(self.attention_norm(x, c), freqs_cis, mask, input_pos)
232
- if self.has_cross_attention:
233
- h = h + self.cross_attention(self.cross_attention_norm(h, c), freqs_cis, cross_attention_mask, input_pos, context, context_freqs_cis)
234
- out = h + self.feed_forward(self.ffn_norm(h, c))
235
- return out
236
-
237
-
238
- class Attention(nn.Module):
239
- def __init__(self, config: ModelArgs, is_cross_attention: bool = False):
240
- super().__init__()
241
- assert config.dim % config.n_head == 0
242
-
243
- total_head_dim = (config.n_head + 2 * config.n_local_heads) * config.head_dim
244
- # key, query, value projections for all heads, but in a batch
245
- if is_cross_attention:
246
- self.wq = nn.Linear(config.dim, config.n_head * config.head_dim, bias=False)
247
- self.wkv = nn.Linear(config.context_dim, 2 * config.n_local_heads * config.head_dim, bias=False)
248
- else:
249
- self.wqkv = nn.Linear(config.dim, total_head_dim, bias=False)
250
- self.wo = nn.Linear(config.head_dim * config.n_head, config.dim, bias=False)
251
- self.kv_cache = None
252
-
253
- self.n_head = config.n_head
254
- self.head_dim = config.head_dim
255
- self.n_local_heads = config.n_local_heads
256
- self.dim = config.dim
257
- # self._register_load_state_dict_pre_hook(self.load_hook)
258
-
259
- # def load_hook(self, state_dict, prefix, *args):
260
- # if prefix + "wq.weight" in state_dict:
261
- # wq = state_dict.pop(prefix + "wq.weight")
262
- # wk = state_dict.pop(prefix + "wk.weight")
263
- # wv = state_dict.pop(prefix + "wv.weight")
264
- # state_dict[prefix + "wqkv.weight"] = torch.cat([wq, wk, wv])
265
-
266
- def forward(self,
267
- x: Tensor,
268
- freqs_cis: Tensor,
269
- mask: Tensor,
270
- input_pos: Optional[Tensor] = None,
271
- context: Optional[Tensor] = None,
272
- context_freqs_cis: Optional[Tensor] = None,
273
- ) -> Tensor:
274
- bsz, seqlen, _ = x.shape
275
-
276
- kv_size = self.n_local_heads * self.head_dim
277
- if context is None:
278
- q, k, v = self.wqkv(x).split([kv_size, kv_size, kv_size], dim=-1)
279
- context_seqlen = seqlen
280
- else:
281
- q = self.wq(x)
282
- k, v = self.wkv(context).split([kv_size, kv_size], dim=-1)
283
- context_seqlen = context.shape[1]
284
-
285
- q = q.view(bsz, seqlen, self.n_head, self.head_dim)
286
- k = k.view(bsz, context_seqlen, self.n_local_heads, self.head_dim)
287
- v = v.view(bsz, context_seqlen, self.n_local_heads, self.head_dim)
288
-
289
- q = apply_rotary_emb(q, freqs_cis)
290
- k = apply_rotary_emb(k, context_freqs_cis if context_freqs_cis is not None else freqs_cis)
291
-
292
- q, k, v = map(lambda x: x.transpose(1, 2), (q, k, v))
293
-
294
- if self.kv_cache is not None:
295
- k, v = self.kv_cache.update(input_pos, k, v)
296
-
297
- k = k.repeat_interleave(self.n_head // self.n_local_heads, dim=1)
298
- v = v.repeat_interleave(self.n_head // self.n_local_heads, dim=1)
299
- y = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0)
300
-
301
- y = y.transpose(1, 2).contiguous().view(bsz, seqlen, self.head_dim * self.n_head)
302
-
303
- y = self.wo(y)
304
- return y
305
-
306
-
307
- class FeedForward(nn.Module):
308
- def __init__(self, config: ModelArgs) -> None:
309
- super().__init__()
310
- self.w1 = nn.Linear(config.dim, config.intermediate_size, bias=False)
311
- self.w3 = nn.Linear(config.dim, config.intermediate_size, bias=False)
312
- self.w2 = nn.Linear(config.intermediate_size, config.dim, bias=False)
313
-
314
- def forward(self, x: Tensor) -> Tensor:
315
- return self.w2(F.silu(self.w1(x)) * self.w3(x))
316
-
317
-
318
- class RMSNorm(nn.Module):
319
- def __init__(self, dim: int, eps: float = 1e-5):
320
- super().__init__()
321
- self.eps = eps
322
- self.weight = nn.Parameter(torch.ones(dim))
323
-
324
- def _norm(self, x):
325
- return x * torch.rsqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps)
326
-
327
- def forward(self, x: Tensor) -> Tensor:
328
- output = self._norm(x.float()).type_as(x)
329
- return output * self.weight
330
-
331
-
332
- def precompute_freqs_cis(
333
- seq_len: int, n_elem: int, base: int = 10000,
334
- dtype: torch.dtype = torch.bfloat16
335
- ) -> Tensor:
336
- freqs = 1.0 / (base ** (torch.arange(0, n_elem, 2)[: (n_elem // 2)].float() / n_elem))
337
- t = torch.arange(seq_len, device=freqs.device)
338
- freqs = torch.outer(t, freqs)
339
- freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
340
- cache = torch.stack([freqs_cis.real, freqs_cis.imag], dim=-1)
341
- return cache.to(dtype=dtype)
342
-
343
-
344
- def apply_rotary_emb(x: Tensor, freqs_cis: Tensor) -> Tensor:
345
- xshaped = x.float().reshape(*x.shape[:-1], -1, 2)
346
- freqs_cis = freqs_cis.view(1, xshaped.size(1), 1, xshaped.size(3), 2)
347
- x_out2 = torch.stack(
348
- [
349
- xshaped[..., 0] * freqs_cis[..., 0] - xshaped[..., 1] * freqs_cis[..., 1],
350
- xshaped[..., 1] * freqs_cis[..., 0] + xshaped[..., 0] * freqs_cis[..., 1],
351
- ],
352
- -1,
353
- )
354
-
355
- x_out2 = x_out2.flatten(3)
356
- return x_out2.type_as(x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/gpt_fast/quantize.py DELETED
@@ -1,622 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
- import time
7
- from pathlib import Path
8
-
9
- import torch
10
- import torch.nn as nn
11
- import torch.nn.functional as F
12
- from tokenizer import get_tokenizer
13
-
14
- try:
15
- from GPTQ import GenericGPTQRunner, InputRecorder
16
- from eval import get_task_dict, evaluate, lm_eval
17
- except:
18
- pass
19
-
20
- from model import Transformer
21
-
22
- ##### Quantization Primitives ######
23
-
24
- def dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype):
25
- # assumes symmetric quantization
26
- # assumes axis == 0
27
- # assumes dense memory format
28
- # TODO(future): relax ^ as needed
29
-
30
- # default setup for affine quantization of activations
31
- eps = torch.finfo(torch.float32).eps
32
-
33
- # get min and max
34
- min_val, max_val = torch.aminmax(x, dim=1)
35
-
36
- # calculate scales and zero_points based on min and max
37
- # reference: https://fburl.com/code/srbiybme
38
- min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
39
- max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
40
- device = min_val_neg.device
41
-
42
- # reference: https://fburl.com/code/4wll53rk
43
- max_val_pos = torch.max(-min_val_neg, max_val_pos)
44
- scales = max_val_pos / (float(quant_max - quant_min) / 2)
45
- # ensure scales is the same dtype as the original tensor
46
- scales = torch.clamp(scales, min=eps).to(x.dtype)
47
- zero_points = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)
48
-
49
- # quantize based on qmin/qmax/scales/zp
50
- # reference: https://www.internalfb.com/code/fbsource/[8edc275012b1]/fbcode/caffe2/torch/ao/quantization/fx/_decomposed.py?lines=63
51
- x_div = x / scales.unsqueeze(-1)
52
- x_round = torch.round(x_div)
53
- x_zp = x_round + zero_points.unsqueeze(-1)
54
- quant = torch.clamp(x_zp, quant_min, quant_max).to(target_dtype)
55
-
56
- return quant, scales, zero_points
57
-
58
- def get_group_qparams(w, n_bit=4, groupsize=128):
59
- # needed for GPTQ with padding
60
- if groupsize > w.shape[-1]:
61
- groupsize = w.shape[-1]
62
- assert groupsize > 1
63
- assert w.shape[-1] % groupsize == 0
64
- assert w.dim() == 2
65
-
66
- to_quant = w.reshape(-1, groupsize)
67
- assert torch.isnan(to_quant).sum() == 0
68
-
69
- max_val = to_quant.amax(dim=1, keepdim=True)
70
- min_val = to_quant.amin(dim=1, keepdim=True)
71
- max_int = 2**n_bit - 1
72
- scales = (max_val - min_val).clamp(min=1e-6) / max_int
73
- zeros = min_val + scales * (2 ** (n_bit - 1))
74
- return scales.to(torch.bfloat16).reshape(w.shape[0], -1), zeros.to(
75
- torch.bfloat16
76
- ).reshape(w.shape[0], -1)
77
-
78
-
79
- def pack_scales_and_zeros(scales, zeros):
80
- assert scales.shape == zeros.shape
81
- assert scales.dtype == torch.bfloat16
82
- assert zeros.dtype == torch.bfloat16
83
- return (
84
- torch.cat(
85
- [
86
- scales.reshape(scales.size(0), scales.size(1), 1),
87
- zeros.reshape(zeros.size(0), zeros.size(1), 1),
88
- ],
89
- 2,
90
- )
91
- .transpose(0, 1)
92
- .contiguous()
93
- )
94
-
95
-
96
- def unpack_scales_and_zeros(scales_and_zeros):
97
- assert len(scales_and_zeros.shape) == 3 and scales_and_zeros.shape[2] == 2
98
- assert scales_and_zeros.dtype == torch.float
99
- return torch.split(scales_and_zeros.transpose(0, 1), 1, 2)
100
-
101
-
102
- def group_quantize_tensor_from_qparams(w, scales, zeros, n_bit=4, groupsize=128):
103
- assert groupsize > 1
104
- # needed for GPTQ single column quantize
105
- if groupsize > w.shape[-1] and scales.shape[-1] == 1:
106
- groupsize = w.shape[-1]
107
-
108
- assert w.shape[-1] % groupsize == 0
109
- assert w.dim() == 2
110
-
111
- to_quant = w.reshape(-1, groupsize)
112
- assert torch.isnan(to_quant).sum() == 0
113
-
114
- scales = scales.reshape(-1, 1)
115
- zeros = zeros.reshape(-1, 1)
116
- min_val = zeros - scales * (2 ** (n_bit - 1))
117
- max_int = 2**n_bit - 1
118
- min_int = 0
119
- w_int32 = (
120
- to_quant.sub(min_val)
121
- .div(scales)
122
- .round()
123
- .clamp_(min_int, max_int)
124
- .to(torch.int32)
125
- .reshape_as(w)
126
- )
127
-
128
- return w_int32
129
-
130
-
131
- def group_quantize_tensor(w, n_bit=4, groupsize=128):
132
- scales, zeros = get_group_qparams(w, n_bit, groupsize)
133
- w_int32 = group_quantize_tensor_from_qparams(w, scales, zeros, n_bit, groupsize)
134
- scales_and_zeros = pack_scales_and_zeros(scales, zeros)
135
- return w_int32, scales_and_zeros
136
-
137
-
138
- def group_dequantize_tensor_from_qparams(
139
- w_int32, scales, zeros, n_bit=4, groupsize=128
140
- ):
141
- assert groupsize > 1
142
- # needed for GPTQ single column dequantize
143
- if groupsize > w_int32.shape[-1] and scales.shape[-1] == 1:
144
- groupsize = w_int32.shape[-1]
145
- assert w_int32.shape[-1] % groupsize == 0
146
- assert w_int32.dim() == 2
147
-
148
- w_int32_grouped = w_int32.reshape(-1, groupsize)
149
- scales = scales.reshape(-1, 1)
150
- zeros = zeros.reshape(-1, 1)
151
-
152
- w_dq = (
153
- w_int32_grouped.sub(2 ** (n_bit - 1)).mul(scales).add(zeros).reshape_as(w_int32)
154
- )
155
- return w_dq
156
-
157
-
158
- def group_dequantize_tensor(w_int32, scales_and_zeros, n_bit=4, groupsize=128):
159
- scales, zeros = unpack_scales_and_zeros(scales_and_zeros)
160
- return group_dequantize_tensor_from_qparams(
161
- w_int32, scales, zeros, n_bit, groupsize
162
- )
163
-
164
- class QuantHandler:
165
- def __init__(self, mod):
166
- self.mod = mod
167
-
168
- def create_quantized_state_dict(self) -> "StateDict":
169
- pass
170
-
171
- def convert_for_runtime(self) -> "nn.Module":
172
- pass
173
-
174
- class GPTQQuantHandler(QuantHandler):
175
- """
176
- This class implements a GPTQ QuantHandler that can be used to apply GPTQ to a model in concert with the GenericGPTQRunner class.
177
- Unlike the base QuantHandler class, the user does not need to implement the create_quantized_state_dict, instead they have to reimplement
178
- __init__ such that it defines the functions for the quantization mode. User is expected to reimplement convert_for_runtime.
179
-
180
- The following functions (which must be defined in __init__) are used to define the quantization mode for both GPTQ and
181
- create_quantized_state_dict. Here is a description of each function.
182
-
183
- get_qparams_func:
184
- A function that calculates the quantization qparams for an input tensor.
185
- Args:
186
- weight: A 2d weight tensor with non-integer dtype.
187
- Returns:
188
- qparams: it can have any format but will need to be handled by the other defined functions below.
189
-
190
- quantize_func:
191
- A function that applies quantization to an input tensor. It should be noted
192
- that this function needs to be able to handle quantizing the entire weight tensor, a single group,
193
- or a single column.
194
- Args:
195
- weight: A 2d weight tensor with non-integer dtype.
196
- qparams: the output from get_qparams_func
197
- Returns:
198
- quantized_weight: A 2d quantized weight tensor (generally with an integer dtype)
199
-
200
-
201
- dequantize_func:
202
- A function that dequantizes an input quantized weight tensor. It should be noted
203
- that this function needs to be able to handle dequantizing the entire weight tensor, a single group,
204
- or a single column.
205
- Args:
206
- quantized_weight: A 2d quantized weight tensor (generally with an integer dtype)
207
- qparams: the output from get_qparams_func
208
- Returns:
209
- weight: A 2d weight tensor with non-integer dtype.
210
-
211
- combine_qparams_list_func:
212
- A function that combines several qparams into one qparam.
213
- Args:
214
- qparams_list: a list of qparams objects, each obtained by calling get_qparams_func
215
- on a single group from a weight tensor
216
- Returns:
217
- qparams: an object of the same format as the qparams above.
218
-
219
- skip_layer_func:
220
- A function that determines which linear layers should be skipped during GPTQ
221
- Args:
222
- weight: A 2d weight tensor with non-integer dtype.
223
- Returns:
224
- skip: boolean indicating whether layer should be skipped
225
-
226
- make_names_and_values_dict_func:
227
- A function that prepares the qparams and quantized_weight and creates a dictionary indicating how they
228
- should be inserted into the state_dict. Generally any packing of the weight and qparams should be done here.
229
- Args:
230
- quantized_weight: A 2d quantized weight tensor (generally with an integer dtype)
231
- qparams: the output from get_qparams_func
232
- Returns:
233
- names_and_values_dict: a dictionary mapping the name of the parameters of the quantized module to the
234
- corresponding quantized weights and qparams.
235
- """
236
- def __init__(self):
237
- assert self.mod is not None
238
- assert self.get_qparams_func is not None
239
- assert self.quantize_func is not None
240
- assert self.dequantize_func is not None
241
- assert self.combine_qparams_list_func is not None
242
- assert self.make_names_and_values_dict_func is not None
243
-
244
- @staticmethod
245
- def get_inputs(model, tokenizer, calibration_tasks, calibration_limit, calibration_seq_length, pad_calibration_inputs) -> "MultiInput":
246
- input_recorder = InputRecorder(
247
- model,
248
- tokenizer,
249
- calibration_seq_length,
250
- pad_calibration_inputs,
251
- )
252
-
253
- try:
254
- lm_eval.tasks.initialize_tasks()
255
- except:
256
- pass
257
- task_dict = get_task_dict(calibration_tasks)
258
- print("Obtaining GPTQ calibration inputs on: ", calibration_tasks)
259
-
260
- evaluate(
261
- input_recorder,
262
- task_dict,
263
- limit=calibration_limit,
264
- )
265
- inputs = input_recorder.get_recorded_inputs()
266
- assert inputs is not None, (
267
- f"No inputs were collected, use a task other than {calibration_tasks}, "+
268
- f"use option pad_calibration_inputs, or decrease calibration_sequence_length (currently "+
269
- f"{calibration_seq_length})"
270
- )
271
- print(f"Obtained {len(inputs[0].values)} calibration samples")
272
- return inputs
273
-
274
- @torch.no_grad()
275
- def create_quantized_state_dict(
276
- self,
277
- tokenizer,
278
- blocksize,
279
- percdamp,
280
- groupsize,
281
- calibration_tasks,
282
- calibration_limit,
283
- calibration_seq_length,
284
- pad_calibration_inputs,
285
- ) -> "StateDict":
286
- inputs = GPTQQuantHandler.get_inputs(self.mod, tokenizer, calibration_tasks, calibration_limit, calibration_seq_length, pad_calibration_inputs)
287
- print("Tracing model for GPTQ")
288
- GPTQ_runner = GenericGPTQRunner(
289
- self.mod,
290
- inputs,
291
- blocksize,
292
- percdamp,
293
- groupsize,
294
- ).configure_quantization_mode(
295
- self.get_qparams_func,
296
- self.quantize_func,
297
- self.dequantize_func,
298
- self.combine_qparams_list_func,
299
- self.make_names_and_values_dict_func,
300
- self.skip_layer_func
301
- )
302
-
303
- print("Applying GPTQ to weights")
304
- GPTQ_runner.run()
305
- return GPTQ_runner.get_quantized_state_dict()
306
-
307
- def convert_for_runtime(self) -> "nn.Module":
308
- pass
309
-
310
- ##### Weight-only int8 per-channel quantized code ######
311
-
312
- def replace_linear_weight_only_int8_per_channel(module):
313
- for name, child in module.named_children():
314
- if isinstance(child, nn.Linear):
315
- setattr(module, name, WeightOnlyInt8Linear(child.in_features, child.out_features))
316
- else:
317
- replace_linear_weight_only_int8_per_channel(child)
318
-
319
- class WeightOnlyInt8QuantHandler:
320
- def __init__(self, mod):
321
- self.mod = mod
322
-
323
- @torch.no_grad()
324
- def create_quantized_state_dict(self):
325
- cur_state_dict = self.mod.state_dict()
326
- for fqn, mod in self.mod.named_modules():
327
- if isinstance(mod, torch.nn.Linear):
328
- int8_weight, scales, _ = dynamically_quantize_per_channel(mod.weight.float(), -128, 127, torch.int8)
329
- cur_state_dict[f"{fqn}.weight"] = int8_weight
330
- cur_state_dict[f"{fqn}.scales"] = scales.to(mod.weight.dtype)
331
-
332
- return cur_state_dict
333
-
334
- def convert_for_runtime(self):
335
- replace_linear_weight_only_int8_per_channel(self.mod)
336
- return self.mod
337
-
338
-
339
- class WeightOnlyInt8Linear(torch.nn.Module):
340
- __constants__ = ['in_features', 'out_features']
341
- in_features: int
342
- out_features: int
343
- weight: torch.Tensor
344
-
345
- def __init__(self, in_features: int, out_features: int, bias: bool = True,
346
- device=None, dtype=None) -> None:
347
- factory_kwargs = {'device': device, 'dtype': dtype}
348
- super().__init__()
349
- self.in_features = in_features
350
- self.out_features = out_features
351
- self.register_buffer("weight", torch.empty((out_features, in_features), dtype=torch.int8))
352
- self.register_buffer("scales", torch.ones(out_features, dtype=torch.bfloat16))
353
-
354
- def forward(self, input: torch.Tensor) -> torch.Tensor:
355
- return F.linear(input, self.weight.to(dtype=input.dtype)) * self.scales
356
-
357
- ##### weight only int4 per channel groupwise quantized code ######
358
-
359
- def prepare_int4_weight_and_scales_and_zeros(weight_bf16, groupsize, inner_k_tiles):
360
- weight_int32, scales_and_zeros = group_quantize_tensor(
361
- weight_bf16, n_bit=4, groupsize=groupsize
362
- )
363
- weight_int4pack = torch.ops.aten._convert_weight_to_int4pack(weight_int32, inner_k_tiles)
364
- return weight_int4pack, scales_and_zeros
365
-
366
-
367
- def linear_forward_int4(x, weight_int4pack, scales_and_zeros, out_features, groupsize):
368
- origin_x_size = x.size()
369
- x = x.reshape(-1, origin_x_size[-1])
370
- c = torch.ops.aten._weight_int4pack_mm(x, weight_int4pack, groupsize, scales_and_zeros)
371
- new_shape = origin_x_size[:-1] + (out_features,)
372
- c = c.reshape(new_shape)
373
- return c
374
-
375
-
376
- def _check_linear_int4_k(k, groupsize = 1, inner_k_tiles = 1):
377
- return k % groupsize == 0 and k % (inner_k_tiles * 16) == 0
378
-
379
- def replace_linear_int4(module, groupsize, inner_k_tiles, padding):
380
- for name, child in module.named_children():
381
- if isinstance(child, nn.Linear):
382
- if _check_linear_int4_k(child.in_features, groupsize, inner_k_tiles):
383
- setattr(module, name, WeightOnlyInt4Linear(
384
- child.in_features, child.out_features, bias=False,
385
- groupsize=groupsize, inner_k_tiles=inner_k_tiles, padding=False,
386
- ))
387
- elif padding:
388
- setattr(module, name, WeightOnlyInt4Linear(
389
- child.in_features, child.out_features, bias=False,
390
- groupsize=groupsize, inner_k_tiles=inner_k_tiles, padding=True,
391
- ))
392
- else:
393
- replace_linear_int4(child, groupsize, inner_k_tiles, padding)
394
-
395
-
396
- class WeightOnlyInt4QuantHandler:
397
- def __init__(self, mod, groupsize=128, inner_k_tiles=8, padding=True):
398
- self.mod = mod
399
- self.groupsize = groupsize
400
- self.inner_k_tiles = inner_k_tiles
401
- self.padding = padding
402
- assert groupsize in [32, 64, 128, 256]
403
- assert inner_k_tiles in [2, 4, 8]
404
-
405
- @torch.no_grad()
406
- def create_quantized_state_dict(self, use_cuda = True):
407
- if use_cuda:
408
- device="cuda"
409
- else:
410
- device="cpu"
411
-
412
- cur_state_dict = self.mod.state_dict()
413
- for fqn, mod in self.mod.named_modules():
414
- if isinstance(mod, torch.nn.Linear):
415
- assert not mod.bias
416
- out_features = mod.out_features
417
- in_features = mod.in_features
418
- assert out_features % 8 == 0, "require out_features % 8 == 0"
419
- print(f"linear: {fqn}, in={in_features}, out={out_features}")
420
-
421
- weight = mod.weight.data
422
- if not _check_linear_int4_k(in_features, self.groupsize, self.inner_k_tiles):
423
- if self.padding:
424
- from model import find_multiple
425
- import torch.nn.functional as F
426
- print(f"warning: {fqn} is padded to satisfy in_features % 1024 == 0")
427
- padded_in_features = find_multiple(in_features, 1024)
428
- weight = F.pad(weight, pad=(0, padded_in_features - in_features))
429
- else:
430
- print(f"warning: {fqn} is skipped, int4 requires that in_features is 32, 64, or is divisible by 1024, " +
431
- "and that groupsize and inner_k_tiles*16 evenly divide into it")
432
- continue
433
- weight_int4pack, scales_and_zeros = prepare_int4_weight_and_scales_and_zeros(
434
- weight.to(torch.bfloat16).to(device=device), self.groupsize, self.inner_k_tiles
435
- )
436
- cur_state_dict[f"{fqn}.weight"] = weight_int4pack.to('cpu')
437
- cur_state_dict[f"{fqn}.scales_and_zeros"] = scales_and_zeros.to('cpu')
438
-
439
- return cur_state_dict
440
-
441
- def convert_for_runtime(self):
442
- replace_linear_int4(self.mod, self.groupsize, self.inner_k_tiles, self.padding)
443
- return self.mod
444
-
445
- class WeightOnlyInt4GPTQQuantHandler(GPTQQuantHandler):
446
- def __init__(self, mod, groupsize=128, inner_k_tiles=8, padding=True):
447
- from model import find_multiple
448
- self.mod = mod
449
- self.groupsize = groupsize
450
- self.inner_k_tiles = inner_k_tiles
451
- self.padding = padding
452
- self.get_qparams_func = lambda w: get_group_qparams(w, 4, groupsize)
453
- self.quantize_func = lambda w, qparams: \
454
- group_quantize_tensor_from_qparams(w, qparams[0], qparams[1], 4, groupsize)
455
- self.dequantize_func = lambda q, qparams: \
456
- group_dequantize_tensor_from_qparams(q, qparams[0], qparams[1], 4, groupsize).float()
457
- self.combine_qparams_list_func = lambda qparams_list: \
458
- [torch.cat(x, dim=1) for x in zip(*qparams_list)]
459
- # skip unless padding=True or its correctly sized
460
- self.skip_layer_func = lambda linear_weight: not (
461
- _check_linear_int4_k(linear_weight.shape[-1], groupsize, inner_k_tiles) or padding
462
- )
463
- # we need to do the padding here, both for q and the qparams if necessary
464
- def make_names_and_values_dict_func(q, qparams):
465
- k = q.shape[1]
466
- new_k = find_multiple(k, 1024)
467
- # how much we need to pad the weight
468
- delta_k = new_k - q.shape[1]
469
- final_q = torch.ops.aten._convert_weight_to_int4pack(F.pad(q, pad=(0, delta_k)), inner_k_tiles)
470
- scales_and_zeros = pack_scales_and_zeros(*qparams)
471
- # how many new groups we need for padded weight
472
- delta_groups = new_k // groupsize - scales_and_zeros.shape[0]
473
- final_s_and_z = F.pad(scales_and_zeros, pad=(0,0,0,0,0, delta_groups), value=1)
474
- return {"weight": final_q, "scales_and_zeros": final_s_and_z}
475
- self.make_names_and_values_dict_func = make_names_and_values_dict_func
476
- super().__init__()
477
-
478
-
479
- def convert_for_runtime(self):
480
- replace_linear_int4(self.mod, self.groupsize, self.inner_k_tiles, self.padding)
481
- return self.mod
482
-
483
- class WeightOnlyInt4Linear(torch.nn.Module):
484
- __constants__ = ['in_features', 'out_features']
485
- in_features: int
486
- out_features: int
487
- weight: torch.Tensor
488
-
489
- def __init__(
490
- self, in_features: int, out_features: int,
491
- bias=True, device=None, dtype=None, groupsize: int = 128, inner_k_tiles: int = 8, padding: bool = True,
492
- ) -> None:
493
- super().__init__()
494
- self.padding = padding
495
- if padding:
496
- from model import find_multiple
497
- self.origin_in_features = in_features
498
- in_features = find_multiple(in_features, 1024)
499
-
500
- self.in_features = in_features
501
- self.out_features = out_features
502
- assert not bias, "require bias=False"
503
- self.groupsize = groupsize
504
- self.inner_k_tiles = inner_k_tiles
505
-
506
- assert out_features % 8 == 0, "require out_features % 8 == 0"
507
- assert in_features % (inner_k_tiles * 16) == 0, "require in_features % (innerKTiles * 16) == 0"
508
- self.register_buffer(
509
- "weight",
510
- torch.empty((out_features // 8, in_features // (inner_k_tiles * 16), 32, inner_k_tiles // 2), dtype=torch.int32)
511
- )
512
- self.register_buffer(
513
- "scales_and_zeros",
514
- torch.empty((in_features // groupsize, out_features, 2), dtype=torch.bfloat16)
515
- )
516
-
517
- def forward(self, input: torch.Tensor) -> torch.Tensor:
518
- input = input.to(torch.bfloat16)
519
- if self.padding:
520
- import torch.nn.functional as F
521
- input = F.pad(input, pad=(0, self.in_features - self.origin_in_features))
522
- return linear_forward_int4(
523
- input,
524
- self.weight, self.scales_and_zeros, self.out_features, self.groupsize
525
- )
526
-
527
-
528
- def quantize(
529
- checkpoint_path: Path = Path("checkpoints/meta-llama/Llama-2-7b-chat-hf/model.pth"),
530
- mode: str = 'int8',
531
- # following arguments only available when setting int4 quantization.
532
- groupsize: int = 128,
533
- # following arguments only used for GPTQ
534
- calibration_tasks: list = ["hellaswag"],
535
- calibration_limit: int = 1000,
536
- calibration_seq_length: int = 100,
537
- pad_calibration_inputs: bool = False,
538
- percdamp: float = .01,
539
- blocksize: int = 128,
540
- label: str = '',
541
- ) -> None:
542
- assert checkpoint_path.is_file(), checkpoint_path
543
-
544
- device = 'cpu'
545
- precision = torch.bfloat16
546
-
547
- print("Loading model ...")
548
- t0 = time.time()
549
-
550
- with torch.device('meta'):
551
- model = Transformer.from_name(checkpoint_path.parent.name)
552
-
553
- checkpoint = torch.load(str(checkpoint_path), mmap=True, weights_only=True)
554
- model.load_state_dict(checkpoint, assign=True)
555
- model = model.to(dtype=precision, device=device)
556
-
557
- if mode == 'int8':
558
- print("Quantizing model weights for int8 weight-only symmetric per-channel quantization")
559
- quant_handler = WeightOnlyInt8QuantHandler(model)
560
- quantized_state_dict = quant_handler.create_quantized_state_dict()
561
-
562
- dir_name = checkpoint_path.parent
563
- base_name = checkpoint_path.name
564
- new_base_name = base_name.replace('.pth', f'{label}int8.pth')
565
-
566
- elif mode == 'int4':
567
- print("Quantizing model weights for int4 weight-only affine per-channel groupwise quantization")
568
- quant_handler = WeightOnlyInt4QuantHandler(model, groupsize)
569
- quantized_state_dict = quant_handler.create_quantized_state_dict()
570
-
571
- dir_name = checkpoint_path.parent
572
- base_name = checkpoint_path.name
573
- new_base_name = base_name.replace('.pth', f"{label}int4.g{groupsize}.pth")
574
-
575
- elif mode == 'int4-gptq':
576
- print("Quantizing model weights for int4 weight-only affine per-channel groupwise quantization using GPTQ...")
577
- quant_handler = WeightOnlyInt4GPTQQuantHandler(model, groupsize)
578
-
579
- tokenizer_path = checkpoint_path.parent / "tokenizer.model"
580
- assert tokenizer_path.is_file(), str(tokenizer_path)
581
- tokenizer = get_tokenizer(tokenizer_path, checkpoint_path)
582
-
583
- quantized_state_dict = quant_handler.create_quantized_state_dict(
584
- tokenizer,
585
- blocksize,
586
- percdamp,
587
- groupsize,
588
- calibration_tasks,
589
- calibration_limit,
590
- calibration_seq_length,
591
- pad_calibration_inputs
592
- )
593
-
594
- dir_name = checkpoint_path.parent
595
- base_name = checkpoint_path.name
596
- new_base_name = base_name.replace('.pth', f"{label}int4-gptq.g{groupsize}.pth")
597
- else:
598
- raise ValueError(f"Invalid quantization mode {mode} needs to be one of [int8, int4, int4-gpptq]")
599
-
600
- quantize_path = dir_name / new_base_name
601
- print(f"Writing quantized weights to {quantize_path}")
602
- quantize_path.unlink(missing_ok=True) # remove existing file if one already there
603
- torch.save(quantized_state_dict, quantize_path)
604
- print(f"Quantization complete took {time.time() - t0:.02f} seconds")
605
- return
606
-
607
- if __name__ == '__main__':
608
- import argparse
609
- parser = argparse.ArgumentParser(description='Quantize a model.')
610
- parser.add_argument('--checkpoint_path', type=Path, default=Path("checkpoints/meta-llama/Llama-2-7b-chat-hf/model.pth"), help='Path to the model checkpoint to be quantized.')
611
- parser.add_argument('--mode', '-q', type=str, default='int8', choices=['int8', 'int4', 'int4-gptq'], help='type of quantization to perform')
612
- parser.add_argument('--groupsize', type=int, default=32, help='Group size for int4 quantization.')
613
- parser.add_argument('--calibration_tasks', type=str, nargs='+', default=['wikitext'], help='tasks to do gptq calibration on, if doing gptq')
614
- parser.add_argument('--calibration_limit', type=int, default=1000, help='number of samples to use for gptq calibration')
615
- parser.add_argument('--calibration_seq_length', type=int, default=100, help='length of sequences to use for gptq calibration')
616
- parser.add_argument('--pad_calibration_inputs', type=bool, default=False, help='pads sequences shorter than calibration_seq_length to that length, yielding more calibration inputs but running much slower')
617
- parser.add_argument('--percdamp', type=float, default=.01, help='gptq percentage dampening')
618
- parser.add_argument('--blocksize', type=int, default=128, help='blocksize for gptq')
619
- parser.add_argument('--label', type=str, default='_', help='label to add to output filename')
620
-
621
- args = parser.parse_args()
622
- quantize(args.checkpoint_path, args.mode, args.groupsize, args.calibration_tasks, args.calibration_limit, args.calibration_seq_length, args.pad_calibration_inputs, args.percdamp, args.blocksize, args.label)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/layers.py DELETED
@@ -1,354 +0,0 @@
1
- import math
2
- import torch
3
- from torch import nn
4
- from typing import Optional, Any
5
- from torch import Tensor
6
- import torch.nn.functional as F
7
- import torchaudio
8
- import torchaudio.functional as audio_F
9
-
10
- import random
11
- random.seed(0)
12
-
13
-
14
- def _get_activation_fn(activ):
15
- if activ == 'relu':
16
- return nn.ReLU()
17
- elif activ == 'lrelu':
18
- return nn.LeakyReLU(0.2)
19
- elif activ == 'swish':
20
- return lambda x: x*torch.sigmoid(x)
21
- else:
22
- raise RuntimeError('Unexpected activ type %s, expected [relu, lrelu, swish]' % activ)
23
-
24
- class LinearNorm(torch.nn.Module):
25
- def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
26
- super(LinearNorm, self).__init__()
27
- self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
28
-
29
- torch.nn.init.xavier_uniform_(
30
- self.linear_layer.weight,
31
- gain=torch.nn.init.calculate_gain(w_init_gain))
32
-
33
- def forward(self, x):
34
- return self.linear_layer(x)
35
-
36
-
37
- class ConvNorm(torch.nn.Module):
38
- def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
39
- padding=None, dilation=1, bias=True, w_init_gain='linear', param=None):
40
- super(ConvNorm, self).__init__()
41
- if padding is None:
42
- assert(kernel_size % 2 == 1)
43
- padding = int(dilation * (kernel_size - 1) / 2)
44
-
45
- self.conv = torch.nn.Conv1d(in_channels, out_channels,
46
- kernel_size=kernel_size, stride=stride,
47
- padding=padding, dilation=dilation,
48
- bias=bias)
49
-
50
- torch.nn.init.xavier_uniform_(
51
- self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain, param=param))
52
-
53
- def forward(self, signal):
54
- conv_signal = self.conv(signal)
55
- return conv_signal
56
-
57
- class CausualConv(nn.Module):
58
- def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=1, dilation=1, bias=True, w_init_gain='linear', param=None):
59
- super(CausualConv, self).__init__()
60
- if padding is None:
61
- assert(kernel_size % 2 == 1)
62
- padding = int(dilation * (kernel_size - 1) / 2) * 2
63
- else:
64
- self.padding = padding * 2
65
- self.conv = nn.Conv1d(in_channels, out_channels,
66
- kernel_size=kernel_size, stride=stride,
67
- padding=self.padding,
68
- dilation=dilation,
69
- bias=bias)
70
-
71
- torch.nn.init.xavier_uniform_(
72
- self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain, param=param))
73
-
74
- def forward(self, x):
75
- x = self.conv(x)
76
- x = x[:, :, :-self.padding]
77
- return x
78
-
79
- class CausualBlock(nn.Module):
80
- def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='lrelu'):
81
- super(CausualBlock, self).__init__()
82
- self.blocks = nn.ModuleList([
83
- self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p)
84
- for i in range(n_conv)])
85
-
86
- def forward(self, x):
87
- for block in self.blocks:
88
- res = x
89
- x = block(x)
90
- x += res
91
- return x
92
-
93
- def _get_conv(self, hidden_dim, dilation, activ='lrelu', dropout_p=0.2):
94
- layers = [
95
- CausualConv(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation),
96
- _get_activation_fn(activ),
97
- nn.BatchNorm1d(hidden_dim),
98
- nn.Dropout(p=dropout_p),
99
- CausualConv(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1),
100
- _get_activation_fn(activ),
101
- nn.Dropout(p=dropout_p)
102
- ]
103
- return nn.Sequential(*layers)
104
-
105
- class ConvBlock(nn.Module):
106
- def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='relu'):
107
- super().__init__()
108
- self._n_groups = 8
109
- self.blocks = nn.ModuleList([
110
- self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p)
111
- for i in range(n_conv)])
112
-
113
-
114
- def forward(self, x):
115
- for block in self.blocks:
116
- res = x
117
- x = block(x)
118
- x += res
119
- return x
120
-
121
- def _get_conv(self, hidden_dim, dilation, activ='relu', dropout_p=0.2):
122
- layers = [
123
- ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation),
124
- _get_activation_fn(activ),
125
- nn.GroupNorm(num_groups=self._n_groups, num_channels=hidden_dim),
126
- nn.Dropout(p=dropout_p),
127
- ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1),
128
- _get_activation_fn(activ),
129
- nn.Dropout(p=dropout_p)
130
- ]
131
- return nn.Sequential(*layers)
132
-
133
- class LocationLayer(nn.Module):
134
- def __init__(self, attention_n_filters, attention_kernel_size,
135
- attention_dim):
136
- super(LocationLayer, self).__init__()
137
- padding = int((attention_kernel_size - 1) / 2)
138
- self.location_conv = ConvNorm(2, attention_n_filters,
139
- kernel_size=attention_kernel_size,
140
- padding=padding, bias=False, stride=1,
141
- dilation=1)
142
- self.location_dense = LinearNorm(attention_n_filters, attention_dim,
143
- bias=False, w_init_gain='tanh')
144
-
145
- def forward(self, attention_weights_cat):
146
- processed_attention = self.location_conv(attention_weights_cat)
147
- processed_attention = processed_attention.transpose(1, 2)
148
- processed_attention = self.location_dense(processed_attention)
149
- return processed_attention
150
-
151
-
152
- class Attention(nn.Module):
153
- def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
154
- attention_location_n_filters, attention_location_kernel_size):
155
- super(Attention, self).__init__()
156
- self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
157
- bias=False, w_init_gain='tanh')
158
- self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
159
- w_init_gain='tanh')
160
- self.v = LinearNorm(attention_dim, 1, bias=False)
161
- self.location_layer = LocationLayer(attention_location_n_filters,
162
- attention_location_kernel_size,
163
- attention_dim)
164
- self.score_mask_value = -float("inf")
165
-
166
- def get_alignment_energies(self, query, processed_memory,
167
- attention_weights_cat):
168
- """
169
- PARAMS
170
- ------
171
- query: decoder output (batch, n_mel_channels * n_frames_per_step)
172
- processed_memory: processed encoder outputs (B, T_in, attention_dim)
173
- attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
174
- RETURNS
175
- -------
176
- alignment (batch, max_time)
177
- """
178
-
179
- processed_query = self.query_layer(query.unsqueeze(1))
180
- processed_attention_weights = self.location_layer(attention_weights_cat)
181
- energies = self.v(torch.tanh(
182
- processed_query + processed_attention_weights + processed_memory))
183
-
184
- energies = energies.squeeze(-1)
185
- return energies
186
-
187
- def forward(self, attention_hidden_state, memory, processed_memory,
188
- attention_weights_cat, mask):
189
- """
190
- PARAMS
191
- ------
192
- attention_hidden_state: attention rnn last output
193
- memory: encoder outputs
194
- processed_memory: processed encoder outputs
195
- attention_weights_cat: previous and cummulative attention weights
196
- mask: binary mask for padded data
197
- """
198
- alignment = self.get_alignment_energies(
199
- attention_hidden_state, processed_memory, attention_weights_cat)
200
-
201
- if mask is not None:
202
- alignment.data.masked_fill_(mask, self.score_mask_value)
203
-
204
- attention_weights = F.softmax(alignment, dim=1)
205
- attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
206
- attention_context = attention_context.squeeze(1)
207
-
208
- return attention_context, attention_weights
209
-
210
-
211
- class ForwardAttentionV2(nn.Module):
212
- def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
213
- attention_location_n_filters, attention_location_kernel_size):
214
- super(ForwardAttentionV2, self).__init__()
215
- self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
216
- bias=False, w_init_gain='tanh')
217
- self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
218
- w_init_gain='tanh')
219
- self.v = LinearNorm(attention_dim, 1, bias=False)
220
- self.location_layer = LocationLayer(attention_location_n_filters,
221
- attention_location_kernel_size,
222
- attention_dim)
223
- self.score_mask_value = -float(1e20)
224
-
225
- def get_alignment_energies(self, query, processed_memory,
226
- attention_weights_cat):
227
- """
228
- PARAMS
229
- ------
230
- query: decoder output (batch, n_mel_channels * n_frames_per_step)
231
- processed_memory: processed encoder outputs (B, T_in, attention_dim)
232
- attention_weights_cat: prev. and cumulative att weights (B, 2, max_time)
233
- RETURNS
234
- -------
235
- alignment (batch, max_time)
236
- """
237
-
238
- processed_query = self.query_layer(query.unsqueeze(1))
239
- processed_attention_weights = self.location_layer(attention_weights_cat)
240
- energies = self.v(torch.tanh(
241
- processed_query + processed_attention_weights + processed_memory))
242
-
243
- energies = energies.squeeze(-1)
244
- return energies
245
-
246
- def forward(self, attention_hidden_state, memory, processed_memory,
247
- attention_weights_cat, mask, log_alpha):
248
- """
249
- PARAMS
250
- ------
251
- attention_hidden_state: attention rnn last output
252
- memory: encoder outputs
253
- processed_memory: processed encoder outputs
254
- attention_weights_cat: previous and cummulative attention weights
255
- mask: binary mask for padded data
256
- """
257
- log_energy = self.get_alignment_energies(
258
- attention_hidden_state, processed_memory, attention_weights_cat)
259
-
260
- #log_energy =
261
-
262
- if mask is not None:
263
- log_energy.data.masked_fill_(mask, self.score_mask_value)
264
-
265
- #attention_weights = F.softmax(alignment, dim=1)
266
-
267
- #content_score = log_energy.unsqueeze(1) #[B, MAX_TIME] -> [B, 1, MAX_TIME]
268
- #log_alpha = log_alpha.unsqueeze(2) #[B, MAX_TIME] -> [B, MAX_TIME, 1]
269
-
270
- #log_total_score = log_alpha + content_score
271
-
272
- #previous_attention_weights = attention_weights_cat[:,0,:]
273
-
274
- log_alpha_shift_padded = []
275
- max_time = log_energy.size(1)
276
- for sft in range(2):
277
- shifted = log_alpha[:,:max_time-sft]
278
- shift_padded = F.pad(shifted, (sft,0), 'constant', self.score_mask_value)
279
- log_alpha_shift_padded.append(shift_padded.unsqueeze(2))
280
-
281
- biased = torch.logsumexp(torch.cat(log_alpha_shift_padded,2), 2)
282
-
283
- log_alpha_new = biased + log_energy
284
-
285
- attention_weights = F.softmax(log_alpha_new, dim=1)
286
-
287
- attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
288
- attention_context = attention_context.squeeze(1)
289
-
290
- return attention_context, attention_weights, log_alpha_new
291
-
292
-
293
- class PhaseShuffle2d(nn.Module):
294
- def __init__(self, n=2):
295
- super(PhaseShuffle2d, self).__init__()
296
- self.n = n
297
- self.random = random.Random(1)
298
-
299
- def forward(self, x, move=None):
300
- # x.size = (B, C, M, L)
301
- if move is None:
302
- move = self.random.randint(-self.n, self.n)
303
-
304
- if move == 0:
305
- return x
306
- else:
307
- left = x[:, :, :, :move]
308
- right = x[:, :, :, move:]
309
- shuffled = torch.cat([right, left], dim=3)
310
- return shuffled
311
-
312
- class PhaseShuffle1d(nn.Module):
313
- def __init__(self, n=2):
314
- super(PhaseShuffle1d, self).__init__()
315
- self.n = n
316
- self.random = random.Random(1)
317
-
318
- def forward(self, x, move=None):
319
- # x.size = (B, C, M, L)
320
- if move is None:
321
- move = self.random.randint(-self.n, self.n)
322
-
323
- if move == 0:
324
- return x
325
- else:
326
- left = x[:, :, :move]
327
- right = x[:, :, move:]
328
- shuffled = torch.cat([right, left], dim=2)
329
-
330
- return shuffled
331
-
332
- class MFCC(nn.Module):
333
- def __init__(self, n_mfcc=40, n_mels=80):
334
- super(MFCC, self).__init__()
335
- self.n_mfcc = n_mfcc
336
- self.n_mels = n_mels
337
- self.norm = 'ortho'
338
- dct_mat = audio_F.create_dct(self.n_mfcc, self.n_mels, self.norm)
339
- self.register_buffer('dct_mat', dct_mat)
340
-
341
- def forward(self, mel_specgram):
342
- if len(mel_specgram.shape) == 2:
343
- mel_specgram = mel_specgram.unsqueeze(0)
344
- unsqueezed = True
345
- else:
346
- unsqueezed = False
347
- # (channel, n_mels, time).tranpose(...) dot (n_mels, n_mfcc)
348
- # -> (channel, time, n_mfcc).tranpose(...)
349
- mfcc = torch.matmul(mel_specgram.transpose(1, 2), self.dct_mat).transpose(1, 2)
350
-
351
- # unpack batch
352
- if unsqueezed:
353
- mfcc = mfcc.squeeze(0)
354
- return mfcc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/length_regulator.py DELETED
@@ -1,141 +0,0 @@
1
- from typing import Tuple
2
- import torch
3
- import torch.nn as nn
4
- from torch.nn import functional as F
5
- from modules.commons import sequence_mask
6
- import numpy as np
7
- from dac.nn.quantize import VectorQuantize
8
-
9
- # f0_bin = 256
10
- f0_max = 1100.0
11
- f0_min = 50.0
12
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
13
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
14
-
15
- def f0_to_coarse(f0, f0_bin):
16
- f0_mel = 1127 * (1 + f0 / 700).log()
17
- a = (f0_bin - 2) / (f0_mel_max - f0_mel_min)
18
- b = f0_mel_min * a - 1.
19
- f0_mel = torch.where(f0_mel > 0, f0_mel * a - b, f0_mel)
20
- # torch.clip_(f0_mel, min=1., max=float(f0_bin - 1))
21
- f0_coarse = torch.round(f0_mel).long()
22
- f0_coarse = f0_coarse * (f0_coarse > 0)
23
- f0_coarse = f0_coarse + ((f0_coarse < 1) * 1)
24
- f0_coarse = f0_coarse * (f0_coarse < f0_bin)
25
- f0_coarse = f0_coarse + ((f0_coarse >= f0_bin) * (f0_bin - 1))
26
- return f0_coarse
27
-
28
- class InterpolateRegulator(nn.Module):
29
- def __init__(
30
- self,
31
- channels: int,
32
- sampling_ratios: Tuple,
33
- is_discrete: bool = False,
34
- in_channels: int = None, # only applies to continuous input
35
- vector_quantize: bool = False, # whether to use vector quantization, only applies to continuous input
36
- codebook_size: int = 1024, # for discrete only
37
- out_channels: int = None,
38
- groups: int = 1,
39
- n_codebooks: int = 1, # number of codebooks
40
- quantizer_dropout: float = 0.0, # dropout for quantizer
41
- f0_condition: bool = False,
42
- n_f0_bins: int = 512,
43
- ):
44
- super().__init__()
45
- self.sampling_ratios = sampling_ratios
46
- out_channels = out_channels or channels
47
- model = nn.ModuleList([])
48
- if len(sampling_ratios) > 0:
49
- self.interpolate = True
50
- for _ in sampling_ratios:
51
- module = nn.Conv1d(channels, channels, 3, 1, 1)
52
- norm = nn.GroupNorm(groups, channels)
53
- act = nn.Mish()
54
- model.extend([module, norm, act])
55
- else:
56
- self.interpolate = False
57
- model.append(
58
- nn.Conv1d(channels, out_channels, 1, 1)
59
- )
60
- self.model = nn.Sequential(*model)
61
- self.embedding = nn.Embedding(codebook_size, channels)
62
- self.is_discrete = is_discrete
63
-
64
- self.mask_token = nn.Parameter(torch.zeros(1, channels))
65
-
66
- self.n_codebooks = n_codebooks
67
- if n_codebooks > 1:
68
- self.extra_codebooks = nn.ModuleList([
69
- nn.Embedding(codebook_size, channels) for _ in range(n_codebooks - 1)
70
- ])
71
- self.extra_codebook_mask_tokens = nn.ParameterList([
72
- nn.Parameter(torch.zeros(1, channels)) for _ in range(n_codebooks - 1)
73
- ])
74
- self.quantizer_dropout = quantizer_dropout
75
-
76
- if f0_condition:
77
- self.f0_embedding = nn.Embedding(n_f0_bins, channels)
78
- self.f0_condition = f0_condition
79
- self.n_f0_bins = n_f0_bins
80
- self.f0_bins = torch.arange(2, 1024, 1024 // n_f0_bins)
81
- self.f0_mask = nn.Parameter(torch.zeros(1, channels))
82
- else:
83
- self.f0_condition = False
84
-
85
- if not is_discrete:
86
- self.content_in_proj = nn.Linear(in_channels, channels)
87
- if vector_quantize:
88
- self.vq = VectorQuantize(channels, codebook_size, 8)
89
-
90
- def forward(self, x, ylens=None, n_quantizers=None, f0=None):
91
- # apply token drop
92
- if self.training:
93
- n_quantizers = torch.ones((x.shape[0],)) * self.n_codebooks
94
- dropout = torch.randint(1, self.n_codebooks + 1, (x.shape[0],))
95
- n_dropout = int(x.shape[0] * self.quantizer_dropout)
96
- n_quantizers[:n_dropout] = dropout[:n_dropout]
97
- n_quantizers = n_quantizers.to(x.device)
98
- # decide whether to drop for each sample in batch
99
- else:
100
- n_quantizers = torch.ones((x.shape[0],), device=x.device) * (self.n_codebooks if n_quantizers is None else n_quantizers)
101
- if self.is_discrete:
102
- if self.n_codebooks > 1:
103
- assert len(x.size()) == 3
104
- x_emb = self.embedding(x[:, 0])
105
- for i, emb in enumerate(self.extra_codebooks):
106
- x_emb = x_emb + (n_quantizers > i+1)[..., None, None] * emb(x[:, i+1])
107
- # add mask token if not using this codebook
108
- # x_emb = x_emb + (n_quantizers <= i+1)[..., None, None] * self.extra_codebook_mask_tokens[i]
109
- x = x_emb
110
- elif self.n_codebooks == 1:
111
- if len(x.size()) == 2:
112
- x = self.embedding(x)
113
- else:
114
- x = self.embedding(x[:, 0])
115
- else:
116
- x = self.content_in_proj(x)
117
- # x in (B, T, D)
118
- mask = sequence_mask(ylens).unsqueeze(-1)
119
- if self.interpolate:
120
- x = F.interpolate(x.transpose(1, 2).contiguous(), size=ylens.max(), mode='nearest')
121
- else:
122
- x = x.transpose(1, 2).contiguous()
123
- mask = mask[:, :x.size(2), :]
124
- ylens = ylens.clamp(max=x.size(2)).long()
125
- if self.f0_condition:
126
- if f0 is None:
127
- x = x + self.f0_mask.unsqueeze(-1)
128
- else:
129
- #quantized_f0 = torch.bucketize(f0, self.f0_bins.to(f0.device)) # (N, T)
130
- quantized_f0 = f0_to_coarse(f0, self.n_f0_bins)
131
- quantized_f0 = quantized_f0.clamp(0, self.n_f0_bins - 1).long()
132
- f0_emb = self.f0_embedding(quantized_f0)
133
- f0_emb = F.interpolate(f0_emb.transpose(1, 2).contiguous(), size=ylens.max(), mode='nearest')
134
- x = x + f0_emb
135
- out = self.model(x).transpose(1, 2).contiguous()
136
- if hasattr(self, 'vq'):
137
- out_q, commitment_loss, codebook_loss, codes, out, = self.vq(out.transpose(1, 2))
138
- out_q = out_q.transpose(1, 2)
139
- return out_q * mask, ylens, codes, commitment_loss, codebook_loss
140
- olens = ylens
141
- return out * mask, olens, None, None, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/quantize.py DELETED
@@ -1,229 +0,0 @@
1
- from dac.nn.quantize import ResidualVectorQuantize
2
- from torch import nn
3
- from modules.wavenet import WN
4
- import torch
5
- import torchaudio
6
- import torchaudio.functional as audio_F
7
- import numpy as np
8
- from .bigvgan import *
9
- from torch.nn.utils import weight_norm
10
- from torch import nn, sin, pow
11
- from einops.layers.torch import Rearrange
12
- from dac.model.encodec import SConv1d
13
-
14
- def init_weights(m):
15
- if isinstance(m, nn.Conv1d):
16
- nn.init.trunc_normal_(m.weight, std=0.02)
17
- nn.init.constant_(m.bias, 0)
18
-
19
-
20
- def WNConv1d(*args, **kwargs):
21
- return weight_norm(nn.Conv1d(*args, **kwargs))
22
-
23
-
24
- def WNConvTranspose1d(*args, **kwargs):
25
- return weight_norm(nn.ConvTranspose1d(*args, **kwargs))
26
-
27
- class SnakeBeta(nn.Module):
28
- """
29
- A modified Snake function which uses separate parameters for the magnitude of the periodic components
30
- Shape:
31
- - Input: (B, C, T)
32
- - Output: (B, C, T), same shape as the input
33
- Parameters:
34
- - alpha - trainable parameter that controls frequency
35
- - beta - trainable parameter that controls magnitude
36
- References:
37
- - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
38
- https://arxiv.org/abs/2006.08195
39
- Examples:
40
- >>> a1 = snakebeta(256)
41
- >>> x = torch.randn(256)
42
- >>> x = a1(x)
43
- """
44
-
45
- def __init__(
46
- self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False
47
- ):
48
- """
49
- Initialization.
50
- INPUT:
51
- - in_features: shape of the input
52
- - alpha - trainable parameter that controls frequency
53
- - beta - trainable parameter that controls magnitude
54
- alpha is initialized to 1 by default, higher values = higher-frequency.
55
- beta is initialized to 1 by default, higher values = higher-magnitude.
56
- alpha will be trained along with the rest of your model.
57
- """
58
- super(SnakeBeta, self).__init__()
59
- self.in_features = in_features
60
-
61
- # initialize alpha
62
- self.alpha_logscale = alpha_logscale
63
- if self.alpha_logscale: # log scale alphas initialized to zeros
64
- self.alpha = nn.Parameter(torch.zeros(in_features) * alpha)
65
- self.beta = nn.Parameter(torch.zeros(in_features) * alpha)
66
- else: # linear scale alphas initialized to ones
67
- self.alpha = nn.Parameter(torch.ones(in_features) * alpha)
68
- self.beta = nn.Parameter(torch.ones(in_features) * alpha)
69
-
70
- self.alpha.requires_grad = alpha_trainable
71
- self.beta.requires_grad = alpha_trainable
72
-
73
- self.no_div_by_zero = 0.000000001
74
-
75
- def forward(self, x):
76
- """
77
- Forward pass of the function.
78
- Applies the function to the input elementwise.
79
- SnakeBeta := x + 1/b * sin^2 (xa)
80
- """
81
- alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
82
- beta = self.beta.unsqueeze(0).unsqueeze(-1)
83
- if self.alpha_logscale:
84
- alpha = torch.exp(alpha)
85
- beta = torch.exp(beta)
86
- x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
87
-
88
- return x
89
-
90
- class ResidualUnit(nn.Module):
91
- def __init__(self, dim: int = 16, dilation: int = 1):
92
- super().__init__()
93
- pad = ((7 - 1) * dilation) // 2
94
- self.block = nn.Sequential(
95
- Activation1d(activation=SnakeBeta(dim, alpha_logscale=True)),
96
- WNConv1d(dim, dim, kernel_size=7, dilation=dilation, padding=pad),
97
- Activation1d(activation=SnakeBeta(dim, alpha_logscale=True)),
98
- WNConv1d(dim, dim, kernel_size=1),
99
- )
100
-
101
- def forward(self, x):
102
- return x + self.block(x)
103
-
104
- class CNNLSTM(nn.Module):
105
- def __init__(self, indim, outdim, head, global_pred=False):
106
- super().__init__()
107
- self.global_pred = global_pred
108
- self.model = nn.Sequential(
109
- ResidualUnit(indim, dilation=1),
110
- ResidualUnit(indim, dilation=2),
111
- ResidualUnit(indim, dilation=3),
112
- Activation1d(activation=SnakeBeta(indim, alpha_logscale=True)),
113
- Rearrange("b c t -> b t c"),
114
- )
115
- self.heads = nn.ModuleList([nn.Linear(indim, outdim) for i in range(head)])
116
-
117
- def forward(self, x):
118
- # x: [B, C, T]
119
- x = self.model(x)
120
- if self.global_pred:
121
- x = torch.mean(x, dim=1, keepdim=False)
122
- outs = [head(x) for head in self.heads]
123
- return outs
124
-
125
- def sequence_mask(length, max_length=None):
126
- if max_length is None:
127
- max_length = length.max()
128
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
129
- return x.unsqueeze(0) < length.unsqueeze(1)
130
- class FAquantizer(nn.Module):
131
- def __init__(self, in_dim=1024,
132
- n_p_codebooks=1,
133
- n_c_codebooks=2,
134
- n_t_codebooks=2,
135
- n_r_codebooks=3,
136
- codebook_size=1024,
137
- codebook_dim=8,
138
- quantizer_dropout=0.5,
139
- causal=False,
140
- separate_prosody_encoder=False,
141
- timbre_norm=False,):
142
- super(FAquantizer, self).__init__()
143
- conv1d_type = SConv1d# if causal else nn.Conv1d
144
- self.prosody_quantizer = ResidualVectorQuantize(
145
- input_dim=in_dim,
146
- n_codebooks=n_p_codebooks,
147
- codebook_size=codebook_size,
148
- codebook_dim=codebook_dim,
149
- quantizer_dropout=quantizer_dropout,
150
- )
151
-
152
- self.content_quantizer = ResidualVectorQuantize(
153
- input_dim=in_dim,
154
- n_codebooks=n_c_codebooks,
155
- codebook_size=codebook_size,
156
- codebook_dim=codebook_dim,
157
- quantizer_dropout=quantizer_dropout,
158
- )
159
-
160
- self.residual_quantizer = ResidualVectorQuantize(
161
- input_dim=in_dim,
162
- n_codebooks=n_r_codebooks,
163
- codebook_size=codebook_size,
164
- codebook_dim=codebook_dim,
165
- quantizer_dropout=quantizer_dropout,
166
- )
167
-
168
- self.melspec_linear = conv1d_type(in_channels=20, out_channels=256, kernel_size=1, causal=causal)
169
- self.melspec_encoder = WN(hidden_channels=256, kernel_size=5, dilation_rate=1, n_layers=8, gin_channels=0, p_dropout=0.2, causal=causal)
170
- self.melspec_linear2 = conv1d_type(in_channels=256, out_channels=1024, kernel_size=1, causal=causal)
171
-
172
- self.prob_random_mask_residual = 0.75
173
-
174
- SPECT_PARAMS = {
175
- "n_fft": 2048,
176
- "win_length": 1200,
177
- "hop_length": 300,
178
- }
179
- MEL_PARAMS = {
180
- "n_mels": 80,
181
- }
182
-
183
- self.to_mel = torchaudio.transforms.MelSpectrogram(
184
- n_mels=MEL_PARAMS["n_mels"], sample_rate=24000, **SPECT_PARAMS
185
- )
186
- self.mel_mean, self.mel_std = -4, 4
187
- self.frame_rate = 24000 / 300
188
- self.hop_length = 300
189
-
190
- def preprocess(self, wave_tensor, n_bins=20):
191
- mel_tensor = self.to_mel(wave_tensor.squeeze(1))
192
- mel_tensor = (torch.log(1e-5 + mel_tensor) - self.mel_mean) / self.mel_std
193
- return mel_tensor[:, :n_bins, :int(wave_tensor.size(-1) / self.hop_length)]
194
-
195
- def forward(self, x, wave_segments):
196
- outs = 0
197
- prosody_feature = self.preprocess(wave_segments)
198
-
199
- f0_input = prosody_feature # (B, T, 20)
200
- f0_input = self.melspec_linear(f0_input)
201
- f0_input = self.melspec_encoder(f0_input, torch.ones(f0_input.shape[0], 1, f0_input.shape[2]).to(
202
- f0_input.device).bool())
203
- f0_input = self.melspec_linear2(f0_input)
204
-
205
- common_min_size = min(f0_input.size(2), x.size(2))
206
- f0_input = f0_input[:, :, :common_min_size]
207
-
208
- x = x[:, :, :common_min_size]
209
-
210
- z_p, codes_p, latents_p, commitment_loss_p, codebook_loss_p = self.prosody_quantizer(
211
- f0_input, 1
212
- )
213
- outs += z_p.detach()
214
-
215
- z_c, codes_c, latents_c, commitment_loss_c, codebook_loss_c = self.content_quantizer(
216
- x, 2
217
- )
218
- outs += z_c.detach()
219
-
220
- residual_feature = x - z_p.detach() - z_c.detach()
221
-
222
- z_r, codes_r, latents_r, commitment_loss_r, codebook_loss_r = self.residual_quantizer(
223
- residual_feature, 3
224
- )
225
-
226
- quantized = [z_p, z_c, z_r]
227
- codes = [codes_p, codes_c, codes_r]
228
-
229
- return quantized, codes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/rmvpe.py DELETED
@@ -1,600 +0,0 @@
1
- from io import BytesIO
2
- import os
3
- from typing import List, Optional, Tuple
4
- import numpy as np
5
- import torch
6
-
7
- import torch.nn as nn
8
- import torch.nn.functional as F
9
- from librosa.util import normalize, pad_center, tiny
10
- from scipy.signal import get_window
11
-
12
- import logging
13
-
14
- logger = logging.getLogger(__name__)
15
-
16
-
17
- class STFT(torch.nn.Module):
18
- def __init__(
19
- self, filter_length=1024, hop_length=512, win_length=None, window="hann"
20
- ):
21
- """
22
- This module implements an STFT using 1D convolution and 1D transpose convolutions.
23
- This is a bit tricky so there are some cases that probably won't work as working
24
- out the same sizes before and after in all overlap add setups is tough. Right now,
25
- this code should work with hop lengths that are half the filter length (50% overlap
26
- between frames).
27
-
28
- Keyword Arguments:
29
- filter_length {int} -- Length of filters used (default: {1024})
30
- hop_length {int} -- Hop length of STFT (restrict to 50% overlap between frames) (default: {512})
31
- win_length {[type]} -- Length of the window function applied to each frame (if not specified, it
32
- equals the filter length). (default: {None})
33
- window {str} -- Type of window to use (options are bartlett, hann, hamming, blackman, blackmanharris)
34
- (default: {'hann'})
35
- """
36
- super(STFT, self).__init__()
37
- self.filter_length = filter_length
38
- self.hop_length = hop_length
39
- self.win_length = win_length if win_length else filter_length
40
- self.window = window
41
- self.forward_transform = None
42
- self.pad_amount = int(self.filter_length / 2)
43
- fourier_basis = np.fft.fft(np.eye(self.filter_length))
44
-
45
- cutoff = int((self.filter_length / 2 + 1))
46
- fourier_basis = np.vstack(
47
- [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])]
48
- )
49
- forward_basis = torch.FloatTensor(fourier_basis)
50
- inverse_basis = torch.FloatTensor(np.linalg.pinv(fourier_basis))
51
-
52
- assert filter_length >= self.win_length
53
- # get window and zero center pad it to filter_length
54
- fft_window = get_window(window, self.win_length, fftbins=True)
55
- fft_window = pad_center(fft_window, size=filter_length)
56
- fft_window = torch.from_numpy(fft_window).float()
57
-
58
- # window the bases
59
- forward_basis *= fft_window
60
- inverse_basis = (inverse_basis.T * fft_window).T
61
-
62
- self.register_buffer("forward_basis", forward_basis.float())
63
- self.register_buffer("inverse_basis", inverse_basis.float())
64
- self.register_buffer("fft_window", fft_window.float())
65
-
66
- def transform(self, input_data, return_phase=False):
67
- """Take input data (audio) to STFT domain.
68
-
69
- Arguments:
70
- input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples)
71
-
72
- Returns:
73
- magnitude {tensor} -- Magnitude of STFT with shape (num_batch,
74
- num_frequencies, num_frames)
75
- phase {tensor} -- Phase of STFT with shape (num_batch,
76
- num_frequencies, num_frames)
77
- """
78
- input_data = F.pad(
79
- input_data,
80
- (self.pad_amount, self.pad_amount),
81
- mode="reflect",
82
- )
83
- forward_transform = input_data.unfold(
84
- 1, self.filter_length, self.hop_length
85
- ).permute(0, 2, 1)
86
- forward_transform = torch.matmul(self.forward_basis, forward_transform)
87
- cutoff = int((self.filter_length / 2) + 1)
88
- real_part = forward_transform[:, :cutoff, :]
89
- imag_part = forward_transform[:, cutoff:, :]
90
- magnitude = torch.sqrt(real_part**2 + imag_part**2)
91
- if return_phase:
92
- phase = torch.atan2(imag_part.data, real_part.data)
93
- return magnitude, phase
94
- else:
95
- return magnitude
96
-
97
- def inverse(self, magnitude, phase):
98
- """Call the inverse STFT (iSTFT), given magnitude and phase tensors produced
99
- by the ```transform``` function.
100
-
101
- Arguments:
102
- magnitude {tensor} -- Magnitude of STFT with shape (num_batch,
103
- num_frequencies, num_frames)
104
- phase {tensor} -- Phase of STFT with shape (num_batch,
105
- num_frequencies, num_frames)
106
-
107
- Returns:
108
- inverse_transform {tensor} -- Reconstructed audio given magnitude and phase. Of
109
- shape (num_batch, num_samples)
110
- """
111
- cat = torch.cat(
112
- [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1
113
- )
114
- fold = torch.nn.Fold(
115
- output_size=(1, (cat.size(-1) - 1) * self.hop_length + self.filter_length),
116
- kernel_size=(1, self.filter_length),
117
- stride=(1, self.hop_length),
118
- )
119
- inverse_transform = torch.matmul(self.inverse_basis, cat)
120
- inverse_transform = fold(inverse_transform)[
121
- :, 0, 0, self.pad_amount : -self.pad_amount
122
- ]
123
- window_square_sum = (
124
- self.fft_window.pow(2).repeat(cat.size(-1), 1).T.unsqueeze(0)
125
- )
126
- window_square_sum = fold(window_square_sum)[
127
- :, 0, 0, self.pad_amount : -self.pad_amount
128
- ]
129
- inverse_transform /= window_square_sum
130
- return inverse_transform
131
-
132
- def forward(self, input_data):
133
- """Take input data (audio) to STFT domain and then back to audio.
134
-
135
- Arguments:
136
- input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples)
137
-
138
- Returns:
139
- reconstruction {tensor} -- Reconstructed audio given magnitude and phase. Of
140
- shape (num_batch, num_samples)
141
- """
142
- self.magnitude, self.phase = self.transform(input_data, return_phase=True)
143
- reconstruction = self.inverse(self.magnitude, self.phase)
144
- return reconstruction
145
-
146
-
147
- from time import time as ttime
148
-
149
-
150
- class BiGRU(nn.Module):
151
- def __init__(self, input_features, hidden_features, num_layers):
152
- super(BiGRU, self).__init__()
153
- self.gru = nn.GRU(
154
- input_features,
155
- hidden_features,
156
- num_layers=num_layers,
157
- batch_first=True,
158
- bidirectional=True,
159
- )
160
-
161
- def forward(self, x):
162
- return self.gru(x)[0]
163
-
164
-
165
- class ConvBlockRes(nn.Module):
166
- def __init__(self, in_channels, out_channels, momentum=0.01):
167
- super(ConvBlockRes, self).__init__()
168
- self.conv = nn.Sequential(
169
- nn.Conv2d(
170
- in_channels=in_channels,
171
- out_channels=out_channels,
172
- kernel_size=(3, 3),
173
- stride=(1, 1),
174
- padding=(1, 1),
175
- bias=False,
176
- ),
177
- nn.BatchNorm2d(out_channels, momentum=momentum),
178
- nn.ReLU(),
179
- nn.Conv2d(
180
- in_channels=out_channels,
181
- out_channels=out_channels,
182
- kernel_size=(3, 3),
183
- stride=(1, 1),
184
- padding=(1, 1),
185
- bias=False,
186
- ),
187
- nn.BatchNorm2d(out_channels, momentum=momentum),
188
- nn.ReLU(),
189
- )
190
- # self.shortcut:Optional[nn.Module] = None
191
- if in_channels != out_channels:
192
- self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
193
-
194
- def forward(self, x: torch.Tensor):
195
- if not hasattr(self, "shortcut"):
196
- return self.conv(x) + x
197
- else:
198
- return self.conv(x) + self.shortcut(x)
199
-
200
-
201
- class Encoder(nn.Module):
202
- def __init__(
203
- self,
204
- in_channels,
205
- in_size,
206
- n_encoders,
207
- kernel_size,
208
- n_blocks,
209
- out_channels=16,
210
- momentum=0.01,
211
- ):
212
- super(Encoder, self).__init__()
213
- self.n_encoders = n_encoders
214
- self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
215
- self.layers = nn.ModuleList()
216
- self.latent_channels = []
217
- for i in range(self.n_encoders):
218
- self.layers.append(
219
- ResEncoderBlock(
220
- in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
221
- )
222
- )
223
- self.latent_channels.append([out_channels, in_size])
224
- in_channels = out_channels
225
- out_channels *= 2
226
- in_size //= 2
227
- self.out_size = in_size
228
- self.out_channel = out_channels
229
-
230
- def forward(self, x: torch.Tensor):
231
- concat_tensors: List[torch.Tensor] = []
232
- x = self.bn(x)
233
- for i, layer in enumerate(self.layers):
234
- t, x = layer(x)
235
- concat_tensors.append(t)
236
- return x, concat_tensors
237
-
238
-
239
- class ResEncoderBlock(nn.Module):
240
- def __init__(
241
- self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
242
- ):
243
- super(ResEncoderBlock, self).__init__()
244
- self.n_blocks = n_blocks
245
- self.conv = nn.ModuleList()
246
- self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
247
- for i in range(n_blocks - 1):
248
- self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
249
- self.kernel_size = kernel_size
250
- if self.kernel_size is not None:
251
- self.pool = nn.AvgPool2d(kernel_size=kernel_size)
252
-
253
- def forward(self, x):
254
- for i, conv in enumerate(self.conv):
255
- x = conv(x)
256
- if self.kernel_size is not None:
257
- return x, self.pool(x)
258
- else:
259
- return x
260
-
261
-
262
- class Intermediate(nn.Module): #
263
- def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
264
- super(Intermediate, self).__init__()
265
- self.n_inters = n_inters
266
- self.layers = nn.ModuleList()
267
- self.layers.append(
268
- ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
269
- )
270
- for i in range(self.n_inters - 1):
271
- self.layers.append(
272
- ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
273
- )
274
-
275
- def forward(self, x):
276
- for i, layer in enumerate(self.layers):
277
- x = layer(x)
278
- return x
279
-
280
-
281
- class ResDecoderBlock(nn.Module):
282
- def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
283
- super(ResDecoderBlock, self).__init__()
284
- out_padding = (0, 1) if stride == (1, 2) else (1, 1)
285
- self.n_blocks = n_blocks
286
- self.conv1 = nn.Sequential(
287
- nn.ConvTranspose2d(
288
- in_channels=in_channels,
289
- out_channels=out_channels,
290
- kernel_size=(3, 3),
291
- stride=stride,
292
- padding=(1, 1),
293
- output_padding=out_padding,
294
- bias=False,
295
- ),
296
- nn.BatchNorm2d(out_channels, momentum=momentum),
297
- nn.ReLU(),
298
- )
299
- self.conv2 = nn.ModuleList()
300
- self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
301
- for i in range(n_blocks - 1):
302
- self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
303
-
304
- def forward(self, x, concat_tensor):
305
- x = self.conv1(x)
306
- x = torch.cat((x, concat_tensor), dim=1)
307
- for i, conv2 in enumerate(self.conv2):
308
- x = conv2(x)
309
- return x
310
-
311
-
312
- class Decoder(nn.Module):
313
- def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
314
- super(Decoder, self).__init__()
315
- self.layers = nn.ModuleList()
316
- self.n_decoders = n_decoders
317
- for i in range(self.n_decoders):
318
- out_channels = in_channels // 2
319
- self.layers.append(
320
- ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
321
- )
322
- in_channels = out_channels
323
-
324
- def forward(self, x: torch.Tensor, concat_tensors: List[torch.Tensor]):
325
- for i, layer in enumerate(self.layers):
326
- x = layer(x, concat_tensors[-1 - i])
327
- return x
328
-
329
-
330
- class DeepUnet(nn.Module):
331
- def __init__(
332
- self,
333
- kernel_size,
334
- n_blocks,
335
- en_de_layers=5,
336
- inter_layers=4,
337
- in_channels=1,
338
- en_out_channels=16,
339
- ):
340
- super(DeepUnet, self).__init__()
341
- self.encoder = Encoder(
342
- in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
343
- )
344
- self.intermediate = Intermediate(
345
- self.encoder.out_channel // 2,
346
- self.encoder.out_channel,
347
- inter_layers,
348
- n_blocks,
349
- )
350
- self.decoder = Decoder(
351
- self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
352
- )
353
-
354
- def forward(self, x: torch.Tensor) -> torch.Tensor:
355
- x, concat_tensors = self.encoder(x)
356
- x = self.intermediate(x)
357
- x = self.decoder(x, concat_tensors)
358
- return x
359
-
360
-
361
- class E2E(nn.Module):
362
- def __init__(
363
- self,
364
- n_blocks,
365
- n_gru,
366
- kernel_size,
367
- en_de_layers=5,
368
- inter_layers=4,
369
- in_channels=1,
370
- en_out_channels=16,
371
- ):
372
- super(E2E, self).__init__()
373
- self.unet = DeepUnet(
374
- kernel_size,
375
- n_blocks,
376
- en_de_layers,
377
- inter_layers,
378
- in_channels,
379
- en_out_channels,
380
- )
381
- self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
382
- if n_gru:
383
- self.fc = nn.Sequential(
384
- BiGRU(3 * 128, 256, n_gru),
385
- nn.Linear(512, 360),
386
- nn.Dropout(0.25),
387
- nn.Sigmoid(),
388
- )
389
- else:
390
- self.fc = nn.Sequential(
391
- nn.Linear(3 * nn.N_MELS, nn.N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
392
- )
393
-
394
- def forward(self, mel):
395
- # print(mel.shape)
396
- mel = mel.transpose(-1, -2).unsqueeze(1)
397
- x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
398
- x = self.fc(x)
399
- # print(x.shape)
400
- return x
401
-
402
-
403
- from librosa.filters import mel
404
-
405
-
406
- class MelSpectrogram(torch.nn.Module):
407
- def __init__(
408
- self,
409
- is_half,
410
- n_mel_channels,
411
- sampling_rate,
412
- win_length,
413
- hop_length,
414
- n_fft=None,
415
- mel_fmin=0,
416
- mel_fmax=None,
417
- clamp=1e-5,
418
- ):
419
- super().__init__()
420
- n_fft = win_length if n_fft is None else n_fft
421
- self.hann_window = {}
422
- mel_basis = mel(
423
- sr=sampling_rate,
424
- n_fft=n_fft,
425
- n_mels=n_mel_channels,
426
- fmin=mel_fmin,
427
- fmax=mel_fmax,
428
- htk=True,
429
- )
430
- mel_basis = torch.from_numpy(mel_basis).float()
431
- self.register_buffer("mel_basis", mel_basis)
432
- self.n_fft = win_length if n_fft is None else n_fft
433
- self.hop_length = hop_length
434
- self.win_length = win_length
435
- self.sampling_rate = sampling_rate
436
- self.n_mel_channels = n_mel_channels
437
- self.clamp = clamp
438
- self.is_half = is_half
439
-
440
- def forward(self, audio, keyshift=0, speed=1, center=True):
441
- factor = 2 ** (keyshift / 12)
442
- n_fft_new = int(np.round(self.n_fft * factor))
443
- win_length_new = int(np.round(self.win_length * factor))
444
- hop_length_new = int(np.round(self.hop_length * speed))
445
- keyshift_key = str(keyshift) + "_" + str(audio.device)
446
- if keyshift_key not in self.hann_window:
447
- self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
448
- audio.device
449
- )
450
- if "privateuseone" in str(audio.device):
451
- if not hasattr(self, "stft"):
452
- self.stft = STFT(
453
- filter_length=n_fft_new,
454
- hop_length=hop_length_new,
455
- win_length=win_length_new,
456
- window="hann",
457
- ).to(audio.device)
458
- magnitude = self.stft.transform(audio)
459
- else:
460
- fft = torch.stft(
461
- audio,
462
- n_fft=n_fft_new,
463
- hop_length=hop_length_new,
464
- win_length=win_length_new,
465
- window=self.hann_window[keyshift_key],
466
- center=center,
467
- return_complex=True,
468
- )
469
- magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
470
- if keyshift != 0:
471
- size = self.n_fft // 2 + 1
472
- resize = magnitude.size(1)
473
- if resize < size:
474
- magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
475
- magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
476
- mel_output = torch.matmul(self.mel_basis, magnitude)
477
- if self.is_half == True:
478
- mel_output = mel_output.half()
479
- log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
480
- return log_mel_spec
481
-
482
-
483
- class RMVPE:
484
- def __init__(self, model_path: str, is_half, device=None, use_jit=False):
485
- self.resample_kernel = {}
486
- self.resample_kernel = {}
487
- self.is_half = is_half
488
- if device is None:
489
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
490
- self.device = device
491
- self.mel_extractor = MelSpectrogram(
492
- is_half, 128, 16000, 1024, 160, None, 30, 8000
493
- ).to(device)
494
- if "privateuseone" in str(device):
495
- import onnxruntime as ort
496
-
497
- ort_session = ort.InferenceSession(
498
- "%s/rmvpe.onnx" % os.environ["rmvpe_root"],
499
- providers=["DmlExecutionProvider"],
500
- )
501
- self.model = ort_session
502
- else:
503
- if str(self.device) == "cuda":
504
- self.device = torch.device("cuda:0")
505
-
506
- def get_default_model():
507
- model = E2E(4, 1, (2, 2))
508
- ckpt = torch.load(model_path, map_location="cpu")
509
- model.load_state_dict(ckpt)
510
- model.eval()
511
- if is_half:
512
- model = model.half()
513
- else:
514
- model = model.float()
515
- return model
516
-
517
- self.model = get_default_model()
518
-
519
- self.model = self.model.to(device)
520
- cents_mapping = 20 * np.arange(360) + 1997.3794084376191
521
- self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
522
-
523
- def mel2hidden(self, mel):
524
- with torch.no_grad():
525
- n_frames = mel.shape[-1]
526
- n_pad = 32 * ((n_frames - 1) // 32 + 1) - n_frames
527
- if n_pad > 0:
528
- mel = F.pad(mel, (0, n_pad), mode="constant")
529
- if "privateuseone" in str(self.device):
530
- onnx_input_name = self.model.get_inputs()[0].name
531
- onnx_outputs_names = self.model.get_outputs()[0].name
532
- hidden = self.model.run(
533
- [onnx_outputs_names],
534
- input_feed={onnx_input_name: mel.cpu().numpy()},
535
- )[0]
536
- else:
537
- mel = mel.half() if self.is_half else mel.float()
538
- hidden = self.model(mel)
539
- return hidden[:, :n_frames]
540
-
541
- def decode(self, hidden, thred=0.03):
542
- cents_pred = self.to_local_average_cents(hidden, thred=thred)
543
- f0 = 10 * (2 ** (cents_pred / 1200))
544
- f0[f0 == 10] = 0
545
- # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
546
- return f0
547
-
548
- def infer_from_audio(self, audio, thred=0.03):
549
- # torch.cuda.synchronize()
550
- # t0 = ttime()
551
- if not torch.is_tensor(audio):
552
- audio = torch.from_numpy(audio)
553
- mel = self.mel_extractor(
554
- audio.float().to(self.device).unsqueeze(0), center=True
555
- )
556
- # print(123123123,mel.device.type)
557
- # torch.cuda.synchronize()
558
- # t1 = ttime()
559
- hidden = self.mel2hidden(mel)
560
- # torch.cuda.synchronize()
561
- # t2 = ttime()
562
- # print(234234,hidden.device.type)
563
- if "privateuseone" not in str(self.device):
564
- hidden = hidden.squeeze(0).cpu().numpy()
565
- else:
566
- hidden = hidden[0]
567
- if self.is_half == True:
568
- hidden = hidden.astype("float32")
569
-
570
- f0 = self.decode(hidden, thred=thred)
571
- # torch.cuda.synchronize()
572
- # t3 = ttime()
573
- # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
574
- return f0
575
-
576
- def to_local_average_cents(self, salience, thred=0.05):
577
- # t0 = ttime()
578
- center = np.argmax(salience, axis=1) # 帧长#index
579
- salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
580
- # t1 = ttime()
581
- center += 4
582
- todo_salience = []
583
- todo_cents_mapping = []
584
- starts = center - 4
585
- ends = center + 5
586
- for idx in range(salience.shape[0]):
587
- todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
588
- todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
589
- # t2 = ttime()
590
- todo_salience = np.array(todo_salience) # 帧长,9
591
- todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
592
- product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
593
- weight_sum = np.sum(todo_salience, 1) # 帧长
594
- devided = product_sum / weight_sum # 帧长
595
- # t3 = ttime()
596
- maxx = np.max(salience, axis=1) # 帧长
597
- devided[maxx <= thred] = 0
598
- # t4 = ttime()
599
- # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
600
- return devided
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/wavenet.py DELETED
@@ -1,174 +0,0 @@
1
- import math
2
- import torch
3
- from torch import nn
4
- from torch.nn import functional as F
5
-
6
- from modules.encodec import SConv1d
7
-
8
- from . import commons
9
- LRELU_SLOPE = 0.1
10
-
11
- class LayerNorm(nn.Module):
12
- def __init__(self, channels, eps=1e-5):
13
- super().__init__()
14
- self.channels = channels
15
- self.eps = eps
16
-
17
- self.gamma = nn.Parameter(torch.ones(channels))
18
- self.beta = nn.Parameter(torch.zeros(channels))
19
-
20
- def forward(self, x):
21
- x = x.transpose(1, -1)
22
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
23
- return x.transpose(1, -1)
24
-
25
-
26
- class ConvReluNorm(nn.Module):
27
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
28
- super().__init__()
29
- self.in_channels = in_channels
30
- self.hidden_channels = hidden_channels
31
- self.out_channels = out_channels
32
- self.kernel_size = kernel_size
33
- self.n_layers = n_layers
34
- self.p_dropout = p_dropout
35
- assert n_layers > 1, "Number of layers should be larger than 0."
36
-
37
- self.conv_layers = nn.ModuleList()
38
- self.norm_layers = nn.ModuleList()
39
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
40
- self.norm_layers.append(LayerNorm(hidden_channels))
41
- self.relu_drop = nn.Sequential(
42
- nn.ReLU(),
43
- nn.Dropout(p_dropout))
44
- for _ in range(n_layers - 1):
45
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
46
- self.norm_layers.append(LayerNorm(hidden_channels))
47
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
48
- self.proj.weight.data.zero_()
49
- self.proj.bias.data.zero_()
50
-
51
- def forward(self, x, x_mask):
52
- x_org = x
53
- for i in range(self.n_layers):
54
- x = self.conv_layers[i](x * x_mask)
55
- x = self.norm_layers[i](x)
56
- x = self.relu_drop(x)
57
- x = x_org + self.proj(x)
58
- return x * x_mask
59
-
60
-
61
- class DDSConv(nn.Module):
62
- """
63
- Dialted and Depth-Separable Convolution
64
- """
65
-
66
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
67
- super().__init__()
68
- self.channels = channels
69
- self.kernel_size = kernel_size
70
- self.n_layers = n_layers
71
- self.p_dropout = p_dropout
72
-
73
- self.drop = nn.Dropout(p_dropout)
74
- self.convs_sep = nn.ModuleList()
75
- self.convs_1x1 = nn.ModuleList()
76
- self.norms_1 = nn.ModuleList()
77
- self.norms_2 = nn.ModuleList()
78
- for i in range(n_layers):
79
- dilation = kernel_size ** i
80
- padding = (kernel_size * dilation - dilation) // 2
81
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
82
- groups=channels, dilation=dilation, padding=padding
83
- ))
84
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
85
- self.norms_1.append(LayerNorm(channels))
86
- self.norms_2.append(LayerNorm(channels))
87
-
88
- def forward(self, x, x_mask, g=None):
89
- if g is not None:
90
- x = x + g
91
- for i in range(self.n_layers):
92
- y = self.convs_sep[i](x * x_mask)
93
- y = self.norms_1[i](y)
94
- y = F.gelu(y)
95
- y = self.convs_1x1[i](y)
96
- y = self.norms_2[i](y)
97
- y = F.gelu(y)
98
- y = self.drop(y)
99
- x = x + y
100
- return x * x_mask
101
-
102
-
103
- class WN(torch.nn.Module):
104
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0, causal=False):
105
- super(WN, self).__init__()
106
- conv1d_type = SConv1d
107
- assert (kernel_size % 2 == 1)
108
- self.hidden_channels = hidden_channels
109
- self.kernel_size = kernel_size,
110
- self.dilation_rate = dilation_rate
111
- self.n_layers = n_layers
112
- self.gin_channels = gin_channels
113
- self.p_dropout = p_dropout
114
-
115
- self.in_layers = torch.nn.ModuleList()
116
- self.res_skip_layers = torch.nn.ModuleList()
117
- self.drop = nn.Dropout(p_dropout)
118
-
119
- if gin_channels != 0:
120
- self.cond_layer = conv1d_type(gin_channels, 2 * hidden_channels * n_layers, 1, norm='weight_norm')
121
-
122
- for i in range(n_layers):
123
- dilation = dilation_rate ** i
124
- padding = int((kernel_size * dilation - dilation) / 2)
125
- in_layer = conv1d_type(hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation,
126
- padding=padding, norm='weight_norm', causal=causal)
127
- self.in_layers.append(in_layer)
128
-
129
- # last one is not necessary
130
- if i < n_layers - 1:
131
- res_skip_channels = 2 * hidden_channels
132
- else:
133
- res_skip_channels = hidden_channels
134
-
135
- res_skip_layer = conv1d_type(hidden_channels, res_skip_channels, 1, norm='weight_norm', causal=causal)
136
- self.res_skip_layers.append(res_skip_layer)
137
-
138
- def forward(self, x, x_mask, g=None, **kwargs):
139
- output = torch.zeros_like(x)
140
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
141
-
142
- if g is not None:
143
- g = self.cond_layer(g)
144
-
145
- for i in range(self.n_layers):
146
- x_in = self.in_layers[i](x)
147
- if g is not None:
148
- cond_offset = i * 2 * self.hidden_channels
149
- g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :]
150
- else:
151
- g_l = torch.zeros_like(x_in)
152
-
153
- acts = commons.fused_add_tanh_sigmoid_multiply(
154
- x_in,
155
- g_l,
156
- n_channels_tensor)
157
- acts = self.drop(acts)
158
-
159
- res_skip_acts = self.res_skip_layers[i](acts)
160
- if i < self.n_layers - 1:
161
- res_acts = res_skip_acts[:, :self.hidden_channels, :]
162
- x = (x + res_acts) * x_mask
163
- output = output + res_skip_acts[:, self.hidden_channels:, :]
164
- else:
165
- output = output + res_skip_acts
166
- return output * x_mask
167
-
168
- def remove_weight_norm(self):
169
- if self.gin_channels != 0:
170
- torch.nn.utils.remove_weight_norm(self.cond_layer)
171
- for l in self.in_layers:
172
- torch.nn.utils.remove_weight_norm(l)
173
- for l in self.res_skip_layers:
174
- torch.nn.utils.remove_weight_norm(l)