duyv commited on
Commit
1085335
·
1 Parent(s): 85ea928

Upload 20 files

Browse files
Vietnam-F-TTS/LOAD/attentions.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+ # import commons
8
+ from LOAD.commons import convert_pad_shape
9
+ from LOAD.modules import LayerNorm
10
+
11
+
12
+ class Encoder(nn.Module):
13
+ def __init__(
14
+ self,
15
+ hidden_channels,
16
+ filter_channels,
17
+ n_heads,
18
+ n_layers,
19
+ kernel_size=1,
20
+ p_dropout=0.0,
21
+ window_size=4,
22
+ **kwargs
23
+ ):
24
+ super().__init__()
25
+ self.hidden_channels = hidden_channels
26
+ self.filter_channels = filter_channels
27
+ self.n_heads = n_heads
28
+ self.n_layers = n_layers
29
+ self.kernel_size = kernel_size
30
+ self.p_dropout = p_dropout
31
+ self.window_size = window_size
32
+
33
+ self.drop = nn.Dropout(p_dropout)
34
+ self.attn_layers = nn.ModuleList()
35
+ self.norm_layers_1 = nn.ModuleList()
36
+ self.ffn_layers = nn.ModuleList()
37
+ self.norm_layers_2 = nn.ModuleList()
38
+ for i in range(self.n_layers):
39
+ self.attn_layers.append(
40
+ MultiHeadAttention(
41
+ hidden_channels,
42
+ hidden_channels,
43
+ n_heads,
44
+ p_dropout=p_dropout,
45
+ window_size=window_size,
46
+ )
47
+ )
48
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
49
+ self.ffn_layers.append(
50
+ FFN(
51
+ hidden_channels,
52
+ hidden_channels,
53
+ filter_channels,
54
+ kernel_size,
55
+ p_dropout=p_dropout,
56
+ )
57
+ )
58
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
59
+
60
+ def forward(self, x, x_mask):
61
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
62
+ x = x * x_mask
63
+ for i in range(self.n_layers):
64
+ y = self.attn_layers[i](x, x, attn_mask)
65
+ y = self.drop(y)
66
+ x = self.norm_layers_1[i](x + y)
67
+
68
+ y = self.ffn_layers[i](x, x_mask)
69
+ y = self.drop(y)
70
+ x = self.norm_layers_2[i](x + y)
71
+ x = x * x_mask
72
+ return x
73
+
74
+
75
+ class MultiHeadAttention(nn.Module):
76
+ def __init__(
77
+ self,
78
+ channels,
79
+ out_channels,
80
+ n_heads,
81
+ p_dropout=0.0,
82
+ window_size=None,
83
+ heads_share=True,
84
+ block_length=None,
85
+ proximal_bias=False,
86
+ proximal_init=False,
87
+ ):
88
+ super().__init__()
89
+ assert channels % n_heads == 0
90
+
91
+ self.channels = channels
92
+ self.out_channels = out_channels
93
+ self.n_heads = n_heads
94
+ self.p_dropout = p_dropout
95
+ self.window_size = window_size
96
+ self.heads_share = heads_share
97
+ self.block_length = block_length
98
+ self.proximal_bias = proximal_bias
99
+ self.proximal_init = proximal_init
100
+ # self.attn = None
101
+
102
+ self.k_channels = channels // n_heads
103
+ self.conv_q = nn.Conv1d(channels, channels, 1)
104
+ self.conv_k = nn.Conv1d(channels, channels, 1)
105
+ self.conv_v = nn.Conv1d(channels, channels, 1)
106
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
107
+ self.drop = nn.Dropout(p_dropout)
108
+
109
+ if window_size is not None:
110
+ n_heads_rel = 1 if heads_share else n_heads
111
+ rel_stddev = self.k_channels**-0.5
112
+ self.emb_rel_k = nn.Parameter(
113
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
114
+ * rel_stddev
115
+ )
116
+ self.emb_rel_v = nn.Parameter(
117
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
118
+ * rel_stddev
119
+ )
120
+
121
+ nn.init.xavier_uniform_(self.conv_q.weight)
122
+ nn.init.xavier_uniform_(self.conv_k.weight)
123
+ nn.init.xavier_uniform_(self.conv_v.weight)
124
+ if proximal_init:
125
+ with torch.no_grad():
126
+ self.conv_k.weight.copy_(self.conv_q.weight)
127
+ self.conv_k.bias.copy_(self.conv_q.bias)
128
+
129
+ def forward(self, x, c, attn_mask=None):
130
+ q = self.conv_q(x)
131
+ k = self.conv_k(c)
132
+ v = self.conv_v(c)
133
+
134
+ x, _ = self.attention(q, k, v, mask=attn_mask)
135
+
136
+ x = self.conv_o(x)
137
+ return x
138
+
139
+ def attention(self, query, key, value, mask=None):
140
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
141
+ b, d, t_s, t_t = (*key.size(), query.size(2))
142
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
143
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
144
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
145
+
146
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
147
+ if self.window_size is not None:
148
+ assert (
149
+ t_s == t_t
150
+ ), "Relative attention is only available for self-attention."
151
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
152
+ rel_logits = self._matmul_with_relative_keys(
153
+ query / math.sqrt(self.k_channels), key_relative_embeddings
154
+ )
155
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
156
+ scores = scores + scores_local
157
+ if self.proximal_bias:
158
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
159
+ scores = scores + self._attention_bias_proximal(t_s).to(
160
+ device=scores.device, dtype=scores.dtype
161
+ )
162
+ if mask is not None:
163
+ scores = scores.masked_fill(mask == 0, -1e4)
164
+ if self.block_length is not None:
165
+ assert (
166
+ t_s == t_t
167
+ ), "Local attention is only available for self-attention."
168
+ block_mask = (
169
+ torch.ones_like(scores)
170
+ .triu(-self.block_length)
171
+ .tril(self.block_length)
172
+ )
173
+ scores = scores.masked_fill(block_mask == 0, -1e4)
174
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
175
+ p_attn = self.drop(p_attn)
176
+ output = torch.matmul(p_attn, value)
177
+ if self.window_size is not None:
178
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
179
+ value_relative_embeddings = self._get_relative_embeddings(
180
+ self.emb_rel_v, t_s
181
+ )
182
+ output = output + self._matmul_with_relative_values(
183
+ relative_weights, value_relative_embeddings
184
+ )
185
+ output = (
186
+ output.transpose(2, 3).contiguous().view(b, d, t_t)
187
+ ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
188
+ return output, p_attn
189
+
190
+ def _matmul_with_relative_values(self, x, y):
191
+ """
192
+ x: [b, h, l, m]
193
+ y: [h or 1, m, d]
194
+ ret: [b, h, l, d]
195
+ """
196
+ ret = torch.matmul(x, y.unsqueeze(0))
197
+ return ret
198
+
199
+ def _matmul_with_relative_keys(self, x, y):
200
+ """
201
+ x: [b, h, l, d]
202
+ y: [h or 1, m, d]
203
+ ret: [b, h, l, m]
204
+ """
205
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
206
+ return ret
207
+
208
+ def _get_relative_embeddings(self, relative_embeddings, length):
209
+ max_relative_position = 2 * self.window_size + 1
210
+ # Pad first before slice to avoid using cond ops.
211
+ pad_length = max(length - (self.window_size + 1), 0)
212
+ slice_start_position = max((self.window_size + 1) - length, 0)
213
+ slice_end_position = slice_start_position + 2 * length - 1
214
+ if pad_length > 0:
215
+ padded_relative_embeddings = F.pad(
216
+ relative_embeddings,
217
+ convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
218
+ )
219
+ else:
220
+ padded_relative_embeddings = relative_embeddings
221
+ used_relative_embeddings = padded_relative_embeddings[
222
+ :, slice_start_position:slice_end_position
223
+ ]
224
+ return used_relative_embeddings
225
+
226
+ def _relative_position_to_absolute_position(self, x):
227
+ """
228
+ x: [b, h, l, 2*l-1]
229
+ ret: [b, h, l, l]
230
+ """
231
+ batch, heads, length, _ = x.size()
232
+ # Concat columns of pad to shift from relative to absolute indexing.
233
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
234
+
235
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
236
+ x_flat = x.view([batch, heads, length * 2 * length])
237
+ x_flat = F.pad(
238
+ x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
239
+ )
240
+
241
+ # Reshape and slice out the padded elements.
242
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
243
+ :, :, :length, length - 1 :
244
+ ]
245
+ return x_final
246
+
247
+ def _absolute_position_to_relative_position(self, x):
248
+ """
249
+ x: [b, h, l, l]
250
+ ret: [b, h, l, 2*l-1]
251
+ """
252
+ batch, heads, length, _ = x.shape
253
+ # padd along column
254
+ x = F.pad(
255
+ x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
256
+ )
257
+ x_flat = x.view([batch, heads, length * length + length * (length - 1)])
258
+ # add 0's in the beginning that will skew the elements after reshape
259
+ x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
260
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
261
+ return x_final
262
+
263
+ def _attention_bias_proximal(self, length):
264
+ """Bias for self-attention to encourage attention to close positions.
265
+ Args:
266
+ length: an integer scalar.
267
+ Returns:
268
+ a Tensor with shape [1, 1, length, length]
269
+ """
270
+ r = torch.arange(length, dtype=torch.float32)
271
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
272
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
273
+
274
+
275
+ class FFN(nn.Module):
276
+ def __init__(
277
+ self,
278
+ in_channels,
279
+ out_channels,
280
+ filter_channels,
281
+ kernel_size,
282
+ p_dropout=0.0,
283
+ activation=None,
284
+ causal=False,
285
+ ):
286
+ super().__init__()
287
+ self.in_channels = in_channels
288
+ self.out_channels = out_channels
289
+ self.filter_channels = filter_channels
290
+ self.kernel_size = kernel_size
291
+ self.p_dropout = p_dropout
292
+ self.activation = activation
293
+ self.causal = causal
294
+
295
+ if causal:
296
+ self.padding = self._causal_padding
297
+ else:
298
+ self.padding = self._same_padding
299
+
300
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
301
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
302
+ self.drop = nn.Dropout(p_dropout)
303
+
304
+ def forward(self, x, x_mask):
305
+ x = self.conv_1(self.padding(x * x_mask))
306
+ if self.activation == "gelu":
307
+ x = x * torch.sigmoid(1.702 * x)
308
+ else:
309
+ x = torch.relu(x)
310
+ x = self.drop(x)
311
+ x = self.conv_2(self.padding(x * x_mask))
312
+ return x * x_mask
313
+
314
+ def _causal_padding(self, x):
315
+ if self.kernel_size == 1:
316
+ return x
317
+ pad_l = self.kernel_size - 1
318
+ pad_r = 0
319
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
320
+ x = F.pad(x, convert_pad_shape(padding))
321
+ return x
322
+
323
+ def _same_padding(self, x):
324
+ if self.kernel_size == 1:
325
+ return x
326
+ pad_l = (self.kernel_size - 1) // 2
327
+ pad_r = self.kernel_size // 2
328
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
329
+ x = F.pad(x, convert_pad_shape(padding))
330
+ return x
Vietnam-F-TTS/LOAD/commons.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from torch.nn import functional as F
5
+
6
+
7
+ def init_weights(m, mean=0.0, std=0.01):
8
+ classname = m.__class__.__name__
9
+ if classname.find("Conv") != -1:
10
+ m.weight.data.normal_(mean, std)
11
+
12
+
13
+ def get_padding(kernel_size, dilation=1):
14
+ return int((kernel_size * dilation - dilation) / 2)
15
+
16
+
17
+ def convert_pad_shape(pad_shape):
18
+ l = pad_shape[::-1]
19
+ pad_shape = [item for sublist in l for item in sublist]
20
+ return pad_shape
21
+
22
+
23
+ def intersperse(lst, item):
24
+ result = [item] * (len(lst) * 2 + 1)
25
+ result[1::2] = lst
26
+ return result
27
+
28
+
29
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
30
+ """KL(P||Q)"""
31
+ kl = (logs_q - logs_p) - 0.5
32
+ kl += (
33
+ 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
34
+ )
35
+ return kl
36
+
37
+
38
+ def rand_gumbel(shape):
39
+ """Sample from the Gumbel distribution, protect from overflows."""
40
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
41
+ return -torch.log(-torch.log(uniform_samples))
42
+
43
+
44
+ def rand_gumbel_like(x):
45
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
46
+ return g
47
+
48
+
49
+ def slice_segments(x, ids_str, segment_size=4):
50
+ ret = torch.zeros_like(x[:, :, :segment_size])
51
+ for i in range(x.size(0)):
52
+ idx_str = ids_str[i]
53
+ idx_end = idx_str + segment_size
54
+ ret[i] = x[i, :, idx_str:idx_end]
55
+ return ret
56
+
57
+
58
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
59
+ b, d, t = x.size()
60
+ if x_lengths is None:
61
+ x_lengths = t
62
+ ids_str_max = x_lengths - segment_size + 1
63
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
64
+ ret = slice_segments(x, ids_str, segment_size)
65
+ return ret, ids_str
66
+
67
+
68
+ def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
69
+ position = torch.arange(length, dtype=torch.float)
70
+ num_timescales = channels // 2
71
+ log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
72
+ num_timescales - 1
73
+ )
74
+ inv_timescales = min_timescale * torch.exp(
75
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
76
+ )
77
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
78
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
79
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
80
+ signal = signal.view(1, channels, length)
81
+ return signal
82
+
83
+
84
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
85
+ b, channels, length = x.size()
86
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
87
+ return x + signal.to(dtype=x.dtype, device=x.device)
88
+
89
+
90
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
91
+ b, channels, length = x.size()
92
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
93
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
94
+
95
+
96
+ def subsequent_mask(length):
97
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
98
+ return mask
99
+
100
+
101
+ @torch.jit.script
102
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
103
+ n_channels_int = n_channels[0]
104
+ in_act = input_a + input_b
105
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
106
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
107
+ acts = t_act * s_act
108
+ return acts
109
+
110
+
111
+ def convert_pad_shape(pad_shape):
112
+ l = pad_shape[::-1]
113
+ pad_shape = [item for sublist in l for item in sublist]
114
+ return pad_shape
115
+
116
+
117
+ def shift_1d(x):
118
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
119
+ return x
120
+
121
+
122
+ def sequence_mask(length, max_length=None):
123
+ if max_length is None:
124
+ max_length = length.max()
125
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
126
+ return x.unsqueeze(0) < length.unsqueeze(1)
127
+
128
+
129
+ def generate_path(duration, mask):
130
+ """
131
+ duration: [b, 1, t_x]
132
+ mask: [b, 1, t_y, t_x]
133
+ """
134
+ device = duration.device
135
+
136
+ b, _, t_y, t_x = mask.shape
137
+ cum_duration = torch.cumsum(duration, -1)
138
+
139
+ cum_duration_flat = cum_duration.view(b * t_x)
140
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
141
+ path = path.view(b, t_x, t_y)
142
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
143
+ path = path.unsqueeze(1).transpose(2, 3) * mask
144
+ return path
145
+
146
+
147
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
148
+ if isinstance(parameters, torch.Tensor):
149
+ parameters = [parameters]
150
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
151
+ norm_type = float(norm_type)
152
+ if clip_value is not None:
153
+ clip_value = float(clip_value)
154
+
155
+ total_norm = 0
156
+ for p in parameters:
157
+ param_norm = p.grad.data.norm(norm_type)
158
+ total_norm += param_norm.item() ** norm_type
159
+ if clip_value is not None:
160
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
161
+ total_norm = total_norm ** (1.0 / norm_type)
162
+ return total_norm
Vietnam-F-TTS/LOAD/flow.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+ from LOAD.modules import WN
5
+
6
+ DEFAULT_MIN_BIN_WIDTH = 1e-3
7
+ DEFAULT_MIN_BIN_HEIGHT = 1e-3
8
+ DEFAULT_MIN_DERIVATIVE = 1e-3
9
+
10
+
11
+ class ResidualCouplingLayer(nn.Module):
12
+ def __init__(
13
+ self,
14
+ channels,
15
+ hidden_channels,
16
+ kernel_size,
17
+ dilation_rate,
18
+ n_layers,
19
+ p_dropout=0,
20
+ gin_channels=0,
21
+ mean_only=False,
22
+ ):
23
+ assert channels % 2 == 0, "channels should be divisible by 2"
24
+ super().__init__()
25
+ self.channels = channels
26
+ self.hidden_channels = hidden_channels
27
+ self.kernel_size = kernel_size
28
+ self.dilation_rate = dilation_rate
29
+ self.n_layers = n_layers
30
+ self.half_channels = channels // 2
31
+ self.mean_only = mean_only
32
+
33
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
34
+ self.enc = WN(
35
+ hidden_channels,
36
+ kernel_size,
37
+ dilation_rate,
38
+ n_layers,
39
+ p_dropout=p_dropout,
40
+ gin_channels=gin_channels,
41
+ )
42
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
43
+ self.post.weight.data.zero_()
44
+ self.post.bias.data.zero_()
45
+
46
+ def forward(self, x, x_mask, g=None, reverse=False):
47
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
48
+ h = self.pre(x0) * x_mask
49
+ h = self.enc(h, x_mask, g=g)
50
+ stats = self.post(h) * x_mask
51
+ if not self.mean_only:
52
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
53
+ else:
54
+ m = stats
55
+ logs = torch.zeros_like(m)
56
+
57
+ if not reverse:
58
+ x1 = m + x1 * torch.exp(logs) * x_mask
59
+ x = torch.cat([x0, x1], 1)
60
+ logdet = torch.sum(logs, [1, 2])
61
+ return x, logdet
62
+ else:
63
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
64
+ x = torch.cat([x0, x1], 1)
65
+ return x
66
+
67
+
68
+ class Flip(nn.Module):
69
+ def forward(self, x, *args, reverse=False, **kwargs):
70
+ x = torch.flip(x, [1])
71
+ if not reverse:
72
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
73
+ return x, logdet
74
+ else:
75
+ return x
76
+
77
+
78
+ class ResidualCouplingBlock(nn.Module):
79
+ def __init__(
80
+ self,
81
+ channels,
82
+ hidden_channels,
83
+ kernel_size,
84
+ dilation_rate,
85
+ n_layers,
86
+ n_flows=4,
87
+ gin_channels=0,
88
+ ):
89
+ super().__init__()
90
+ self.channels = channels
91
+ self.hidden_channels = hidden_channels
92
+ self.kernel_size = kernel_size
93
+ self.dilation_rate = dilation_rate
94
+ self.n_layers = n_layers
95
+ self.n_flows = n_flows
96
+ self.gin_channels = gin_channels
97
+
98
+ self.flows = nn.ModuleList()
99
+ for i in range(n_flows):
100
+ self.flows.append(
101
+ ResidualCouplingLayer(
102
+ channels,
103
+ hidden_channels,
104
+ kernel_size,
105
+ dilation_rate,
106
+ n_layers,
107
+ gin_channels=gin_channels,
108
+ mean_only=True,
109
+ )
110
+ )
111
+ self.flows.append(Flip())
112
+
113
+ def forward(self, x, x_mask, g=None, reverse=False):
114
+ if not reverse:
115
+ for flow in self.flows:
116
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
117
+ else:
118
+ for flow in reversed(self.flows):
119
+ x = flow(x, x_mask, g=g, reverse=reverse)
120
+ return x
Vietnam-F-TTS/LOAD/models.py ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import Conv1d, Conv2d, ConvTranspose1d
6
+ from torch.nn import functional as F
7
+ from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
8
+ from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
9
+
10
+
11
+ from LOAD.attentions import Encoder
12
+ from LOAD.modules import WN, ResBlock1, ResBlock2, LRELU_SLOPE
13
+
14
+ from LOAD.commons import get_padding, init_weights, sequence_mask, rand_slice_segments
15
+ from LOAD.flow import ResidualCouplingBlock
16
+
17
+
18
+ class PriorEncoder(nn.Module):
19
+ def __init__(
20
+ self,
21
+ n_vocab,
22
+ out_channels,
23
+ hidden_channels,
24
+ filter_channels,
25
+ n_heads,
26
+ n_layers,
27
+ kernel_size,
28
+ p_dropout,
29
+ ):
30
+ super().__init__()
31
+ self.n_vocab = n_vocab
32
+ self.out_channels = out_channels
33
+ self.hidden_channels = hidden_channels
34
+ self.filter_channels = filter_channels
35
+ self.n_heads = n_heads
36
+ self.n_layers = n_layers
37
+ self.kernel_size = kernel_size
38
+ self.p_dropout = p_dropout
39
+
40
+ self.emb = nn.Embedding(n_vocab, hidden_channels)
41
+ nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
42
+ self.pre_attn_encoder = Encoder(
43
+ hidden_channels,
44
+ filter_channels,
45
+ n_heads,
46
+ n_layers // 2,
47
+ kernel_size,
48
+ p_dropout,
49
+ )
50
+ self.post_attn_encoder = Encoder(
51
+ hidden_channels,
52
+ filter_channels,
53
+ n_heads,
54
+ n_layers - n_layers // 2,
55
+ kernel_size,
56
+ p_dropout,
57
+ )
58
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
59
+
60
+ def forward(self, x, x_lengths, y_lengths, attn):
61
+ x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
62
+ x = torch.transpose(x, 1, -1) # [b, h, t]
63
+ x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(
64
+ x.dtype
65
+ )
66
+ x = self.pre_attn_encoder(x * x_mask, x_mask)
67
+ y = torch.einsum("bht,blt->bhl", x, attn)
68
+ y_mask = torch.unsqueeze(sequence_mask(y_lengths, y.size(2)), 1).to(
69
+ y.dtype
70
+ )
71
+ y = self.post_attn_encoder(y * y_mask, y_mask)
72
+ stats = self.proj(y) * y_mask
73
+
74
+ m, logs = torch.split(stats, self.out_channels, dim=1)
75
+ return y, m, logs, y_mask
76
+
77
+
78
+ class PosteriorEncoder(nn.Module):
79
+ def __init__(
80
+ self,
81
+ in_channels,
82
+ out_channels,
83
+ hidden_channels,
84
+ kernel_size,
85
+ dilation_rate,
86
+ n_layers,
87
+ gin_channels=0,
88
+ ):
89
+ super().__init__()
90
+ self.in_channels = in_channels
91
+ self.out_channels = out_channels
92
+ self.hidden_channels = hidden_channels
93
+ self.kernel_size = kernel_size
94
+ self.dilation_rate = dilation_rate
95
+ self.n_layers = n_layers
96
+ self.gin_channels = gin_channels
97
+
98
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
99
+ self.enc = WN(
100
+ hidden_channels,
101
+ kernel_size,
102
+ dilation_rate,
103
+ n_layers,
104
+ gin_channels=gin_channels,
105
+ )
106
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
107
+
108
+ def forward(self, x, x_lengths, g=None):
109
+ x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(
110
+ x.dtype
111
+ )
112
+ x = self.pre(x) * x_mask
113
+ x = self.enc(x, x_mask, g=g)
114
+ stats = self.proj(x) * x_mask
115
+ m, logs = torch.split(stats, self.out_channels, dim=1)
116
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
117
+ return z, m, logs, x_mask
118
+
119
+
120
+ class Generator(torch.nn.Module):
121
+ def __init__(
122
+ self,
123
+ initial_channel,
124
+ resblock,
125
+ resblock_kernel_sizes,
126
+ resblock_dilation_sizes,
127
+ upsample_rates,
128
+ upsample_initial_channel,
129
+ upsample_kernel_sizes,
130
+ gin_channels=0,
131
+ ):
132
+ super(Generator, self).__init__()
133
+ self.num_kernels = len(resblock_kernel_sizes)
134
+ self.num_upsamples = len(upsample_rates)
135
+ self.conv_pre = Conv1d(
136
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
137
+ )
138
+
139
+ resblock = ResBlock1 if resblock == "1" else ResBlock2
140
+
141
+ self.ups = nn.ModuleList()
142
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
143
+ self.ups.append(
144
+ weight_norm(
145
+ ConvTranspose1d(
146
+ upsample_initial_channel // (2**i),
147
+ upsample_initial_channel // (2 ** (i + 1)),
148
+ k,
149
+ u,
150
+ padding=(k - u) // 2,
151
+ )
152
+ )
153
+ )
154
+
155
+ self.resblocks = nn.ModuleList()
156
+ for i in range(len(self.ups)):
157
+ ch = upsample_initial_channel // (2 ** (i + 1))
158
+ for j, (k, d) in enumerate(
159
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
160
+ ):
161
+ self.resblocks.append(resblock(ch, k, d))
162
+
163
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
164
+ self.ups.apply(init_weights)
165
+
166
+ if gin_channels != 0:
167
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
168
+
169
+ def forward(self, x, g=None):
170
+ x = self.conv_pre(x)
171
+ if g is not None:
172
+ x = x + self.cond(g)
173
+
174
+ for i in range(self.num_upsamples):
175
+
176
+ x = F.leaky_relu(x, LRELU_SLOPE)
177
+ x = self.ups[i](x)
178
+ xs = None
179
+ for j in range(self.num_kernels):
180
+ if xs is None:
181
+ xs = self.resblocks[i * self.num_kernels + j](x)
182
+ else:
183
+ xs += self.resblocks[i * self.num_kernels + j](x)
184
+ x = xs / self.num_kernels
185
+ x = F.leaky_relu(x)
186
+ x = self.conv_post(x)
187
+ x = torch.tanh(x)
188
+
189
+ return x
190
+
191
+ def remove_weight_norm(self):
192
+ print("Removing weight norm...")
193
+ for l in self.ups:
194
+ remove_weight_norm(l)
195
+ for l in self.resblocks:
196
+ l.remove_weight_norm()
197
+
198
+
199
+ class DiscriminatorP(torch.nn.Module):
200
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
201
+ super(DiscriminatorP, self).__init__()
202
+ self.period = period
203
+ self.use_spectral_norm = use_spectral_norm
204
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
205
+ self.convs = nn.ModuleList(
206
+ [
207
+ norm_f(
208
+ Conv2d(
209
+ 1,
210
+ 32,
211
+ (kernel_size, 1),
212
+ (stride, 1),
213
+ padding=(get_padding(kernel_size, 1), 0),
214
+ )
215
+ ),
216
+ norm_f(
217
+ Conv2d(
218
+ 32,
219
+ 128,
220
+ (kernel_size, 1),
221
+ (stride, 1),
222
+ padding=(get_padding(kernel_size, 1), 0),
223
+ )
224
+ ),
225
+ norm_f(
226
+ Conv2d(
227
+ 128,
228
+ 512,
229
+ (kernel_size, 1),
230
+ (stride, 1),
231
+ padding=(get_padding(kernel_size, 1), 0),
232
+ )
233
+ ),
234
+ norm_f(
235
+ Conv2d(
236
+ 512,
237
+ 1024,
238
+ (kernel_size, 1),
239
+ (stride, 1),
240
+ padding=(get_padding(kernel_size, 1), 0),
241
+ )
242
+ ),
243
+ norm_f(
244
+ Conv2d(
245
+ 1024,
246
+ 1024,
247
+ (kernel_size, 1),
248
+ 1,
249
+ padding=(get_padding(kernel_size, 1), 0),
250
+ )
251
+ ),
252
+ ]
253
+ )
254
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
255
+
256
+ def forward(self, x):
257
+ fmap = []
258
+
259
+ # 1d to 2d
260
+ b, c, t = x.shape
261
+ if t % self.period != 0: # pad first
262
+ n_pad = self.period - (t % self.period)
263
+ x = F.pad(x, (0, n_pad), "reflect")
264
+ t = t + n_pad
265
+ x = x.view(b, c, t // self.period, self.period)
266
+
267
+ for l in self.convs:
268
+ x = l(x)
269
+
270
+ x = F.leaky_relu(x, LRELU_SLOPE)
271
+ fmap.append(x)
272
+ x = self.conv_post(x)
273
+ fmap.append(x)
274
+ x = torch.flatten(x, 1, -1)
275
+
276
+ return x, fmap
277
+
278
+
279
+ class DiscriminatorS(torch.nn.Module):
280
+ def __init__(self, use_spectral_norm=False):
281
+ super(DiscriminatorS, self).__init__()
282
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
283
+ self.convs = nn.ModuleList(
284
+ [
285
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
286
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
287
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
288
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
289
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
290
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
291
+ ]
292
+ )
293
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
294
+
295
+ def forward(self, x):
296
+ fmap = []
297
+
298
+ for l in self.convs:
299
+ x = l(x)
300
+
301
+ x = F.leaky_relu(x, LRELU_SLOPE)
302
+ fmap.append(x)
303
+ x = self.conv_post(x)
304
+ fmap.append(x)
305
+ x = torch.flatten(x, 1, -1)
306
+
307
+ return x, fmap
308
+
309
+
310
+ class MultiPeriodDiscriminator(torch.nn.Module):
311
+ def __init__(self, use_spectral_norm=False):
312
+ super(MultiPeriodDiscriminator, self).__init__()
313
+ periods = [2, 3, 5, 7, 11]
314
+
315
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
316
+ discs = discs + [
317
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
318
+ ]
319
+ self.discriminators = nn.ModuleList(discs)
320
+
321
+ def forward(self, y, y_hat):
322
+ y_d_rs = []
323
+ y_d_gs = []
324
+ fmap_rs = []
325
+ fmap_gs = []
326
+ for i, d in enumerate(self.discriminators):
327
+ y_d_r, fmap_r = d(y)
328
+ y_d_g, fmap_g = d(y_hat)
329
+ y_d_rs.append(y_d_r)
330
+ y_d_gs.append(y_d_g)
331
+ fmap_rs.append(fmap_r)
332
+ fmap_gs.append(fmap_g)
333
+
334
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
335
+
336
+
337
+ class SynthesizerTrn(nn.Module):
338
+ """
339
+ Synthesizer for Training
340
+ """
341
+
342
+ def __init__(
343
+ self,
344
+ n_vocab,
345
+ spec_channels,
346
+ segment_size,
347
+ inter_channels,
348
+ hidden_channels,
349
+ filter_channels,
350
+ n_heads,
351
+ n_layers,
352
+ kernel_size,
353
+ p_dropout,
354
+ resblock,
355
+ resblock_kernel_sizes,
356
+ resblock_dilation_sizes,
357
+ upsample_rates,
358
+ upsample_initial_channel,
359
+ upsample_kernel_sizes,
360
+ n_speakers=0,
361
+ gin_channels=0,
362
+ **kwargs
363
+ ):
364
+ super().__init__()
365
+ self.n_vocab = n_vocab
366
+ self.spec_channels = spec_channels
367
+ self.inter_channels = inter_channels
368
+ self.hidden_channels = hidden_channels
369
+ self.filter_channels = filter_channels
370
+ self.n_heads = n_heads
371
+ self.n_layers = n_layers
372
+ self.kernel_size = kernel_size
373
+ self.p_dropout = p_dropout
374
+ self.resblock = resblock
375
+ self.resblock_kernel_sizes = resblock_kernel_sizes
376
+ self.resblock_dilation_sizes = resblock_dilation_sizes
377
+ self.upsample_rates = upsample_rates
378
+ self.upsample_initial_channel = upsample_initial_channel
379
+ self.upsample_kernel_sizes = upsample_kernel_sizes
380
+ self.segment_size = segment_size
381
+ self.n_speakers = n_speakers
382
+ self.gin_channels = gin_channels
383
+
384
+ self.enc_p = PriorEncoder(
385
+ n_vocab,
386
+ inter_channels,
387
+ hidden_channels,
388
+ filter_channels,
389
+ n_heads,
390
+ n_layers,
391
+ kernel_size,
392
+ p_dropout,
393
+ )
394
+ self.dec = Generator(
395
+ inter_channels,
396
+ resblock,
397
+ resblock_kernel_sizes,
398
+ resblock_dilation_sizes,
399
+ upsample_rates,
400
+ upsample_initial_channel,
401
+ upsample_kernel_sizes,
402
+ gin_channels=gin_channels,
403
+ )
404
+ self.enc_q = PosteriorEncoder(
405
+ spec_channels,
406
+ inter_channels,
407
+ hidden_channels,
408
+ 5,
409
+ 1,
410
+ 16,
411
+ gin_channels=gin_channels,
412
+ )
413
+ self.flow = ResidualCouplingBlock(
414
+ inter_channels, hidden_channels, 5, 2, 4, gin_channels=gin_channels
415
+ )
416
+
417
+ if n_speakers > 1:
418
+ self.emb_g = nn.Embedding(n_speakers, gin_channels)
419
+
420
+ def forward(self, x, x_lengths, attn, y, y_lengths, sid=None):
421
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, y_lengths, attn=attn)
422
+ if self.n_speakers > 0:
423
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
424
+ else:
425
+ g = None
426
+
427
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
428
+ z_p = self.flow(z, y_mask, g=g)
429
+ z_slice, ids_slice = rand_slice_segments(
430
+ z, y_lengths, self.segment_size
431
+ )
432
+ o = self.dec(z_slice, g=g)
433
+ l_length = None
434
+ return (
435
+ o,
436
+ l_length,
437
+ attn,
438
+ ids_slice,
439
+ x_mask,
440
+ y_mask,
441
+ (z, z_p, m_p, logs_p, m_q, logs_q),
442
+ )
443
+
444
+ def infer(
445
+ self,
446
+ x,
447
+ x_lengths,
448
+ y_lengths,
449
+ attn,
450
+ sid=None,
451
+ noise_scale=1,
452
+ max_len=None,
453
+ ):
454
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, y_lengths, attn=attn)
455
+ if self.n_speakers > 0:
456
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
457
+ else:
458
+ g = None
459
+
460
+ y_mask = torch.unsqueeze(sequence_mask(y_lengths, attn.shape[1]), 1).to(
461
+ x_mask.dtype
462
+ )
463
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
464
+ z = self.flow(z_p, y_mask, g=g, reverse=True)
465
+ o = self.dec((z * y_mask)[:, :, :max_len], g=g)
466
+ return o, attn, y_mask, (z, z_p, m_p, logs_p)
467
+
468
+
469
+ class DurationNet(torch.nn.Module):
470
+ def __init__(self, vocab_size: int, dim: int, num_layers=2):
471
+ super().__init__()
472
+ self.embed = torch.nn.Embedding(vocab_size, embedding_dim=dim)
473
+ self.rnn = torch.nn.GRU(
474
+ dim,
475
+ dim,
476
+ num_layers=num_layers,
477
+ batch_first=True,
478
+ bidirectional=True,
479
+ dropout=0.2,
480
+ )
481
+ self.proj = torch.nn.Linear(2 * dim, 1)
482
+
483
+ def forward(self, token, lengths):
484
+ x = self.embed(token)
485
+ lengths = lengths.long().cpu()
486
+ x = pack_padded_sequence(
487
+ x, lengths=lengths, batch_first=True, enforce_sorted=False
488
+ )
489
+ x, _ = self.rnn(x)
490
+ x, _ = pad_packed_sequence(x, batch_first=True, total_length=token.shape[1])
491
+ x = self.proj(x)
492
+ x = torch.nn.functional.softplus(x)
493
+ return x
Vietnam-F-TTS/LOAD/modules.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from torch.nn import Conv1d
4
+ from torch.nn import functional as F
5
+ from torch.nn.utils import remove_weight_norm, weight_norm
6
+
7
+ # import commons
8
+ from LOAD.commons import get_padding, init_weights, fused_add_tanh_sigmoid_multiply
9
+
10
+ LRELU_SLOPE = 0.1
11
+
12
+
13
+ class LayerNorm(nn.Module):
14
+ def __init__(self, channels, eps=1e-5):
15
+ super().__init__()
16
+ self.channels = channels
17
+ self.eps = eps
18
+
19
+ self.gamma = nn.Parameter(torch.ones(channels))
20
+ self.beta = nn.Parameter(torch.zeros(channels))
21
+
22
+ def forward(self, x):
23
+ x = x.transpose(1, -1)
24
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
25
+ return x.transpose(1, -1)
26
+
27
+
28
+ class ConvReluNorm(nn.Module):
29
+ def __init__(
30
+ self,
31
+ in_channels,
32
+ hidden_channels,
33
+ out_channels,
34
+ kernel_size,
35
+ n_layers,
36
+ p_dropout,
37
+ ):
38
+ super().__init__()
39
+ self.in_channels = in_channels
40
+ self.hidden_channels = hidden_channels
41
+ self.out_channels = out_channels
42
+ self.kernel_size = kernel_size
43
+ self.n_layers = n_layers
44
+ self.p_dropout = p_dropout
45
+ assert n_layers > 1, "Number of layers should be larger than 0."
46
+
47
+ self.conv_layers = nn.ModuleList()
48
+ self.norm_layers = nn.ModuleList()
49
+ self.conv_layers.append(
50
+ nn.Conv1d(
51
+ in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
52
+ )
53
+ )
54
+ self.norm_layers.append(LayerNorm(hidden_channels))
55
+ self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
56
+ for _ in range(n_layers - 1):
57
+ self.conv_layers.append(
58
+ nn.Conv1d(
59
+ hidden_channels,
60
+ hidden_channels,
61
+ kernel_size,
62
+ padding=kernel_size // 2,
63
+ )
64
+ )
65
+ self.norm_layers.append(LayerNorm(hidden_channels))
66
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
67
+ self.proj.weight.data.zero_()
68
+ self.proj.bias.data.zero_()
69
+
70
+ def forward(self, x, x_mask):
71
+ x_org = x
72
+ for i in range(self.n_layers):
73
+ x = self.conv_layers[i](x * x_mask)
74
+ x = self.norm_layers[i](x)
75
+ x = self.relu_drop(x)
76
+ x = x_org + self.proj(x)
77
+ return x * x_mask
78
+
79
+
80
+ class DDSConv(nn.Module):
81
+ """
82
+ Dialted and Depth-Separable Convolution
83
+ """
84
+
85
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
86
+ super().__init__()
87
+ self.channels = channels
88
+ self.kernel_size = kernel_size
89
+ self.n_layers = n_layers
90
+ self.p_dropout = p_dropout
91
+
92
+ self.drop = nn.Dropout(p_dropout)
93
+ self.convs_sep = nn.ModuleList()
94
+ self.convs_1x1 = nn.ModuleList()
95
+ self.norms_1 = nn.ModuleList()
96
+ self.norms_2 = nn.ModuleList()
97
+ for i in range(n_layers):
98
+ dilation = kernel_size**i
99
+ padding = (kernel_size * dilation - dilation) // 2
100
+ self.convs_sep.append(
101
+ nn.Conv1d(
102
+ channels,
103
+ channels,
104
+ kernel_size,
105
+ groups=channels,
106
+ dilation=dilation,
107
+ padding=padding,
108
+ )
109
+ )
110
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
111
+ self.norms_1.append(LayerNorm(channels))
112
+ self.norms_2.append(LayerNorm(channels))
113
+
114
+ def forward(self, x, x_mask, g=None):
115
+ if g is not None:
116
+ x = x + g
117
+ for i in range(self.n_layers):
118
+ y = self.convs_sep[i](x * x_mask)
119
+ y = self.norms_1[i](y)
120
+ y = F.gelu(y)
121
+ y = self.convs_1x1[i](y)
122
+ y = self.norms_2[i](y)
123
+ y = F.gelu(y)
124
+ y = self.drop(y)
125
+ x = x + y
126
+ return x * x_mask
127
+
128
+
129
+ class WN(torch.nn.Module):
130
+ def __init__(
131
+ self,
132
+ hidden_channels,
133
+ kernel_size,
134
+ dilation_rate,
135
+ n_layers,
136
+ gin_channels=0,
137
+ p_dropout=0,
138
+ ):
139
+ super(WN, self).__init__()
140
+ assert kernel_size % 2 == 1
141
+ self.hidden_channels = hidden_channels
142
+ self.kernel_size = (kernel_size,)
143
+ self.dilation_rate = dilation_rate
144
+ self.n_layers = n_layers
145
+ self.gin_channels = gin_channels
146
+ self.p_dropout = p_dropout
147
+
148
+ self.in_layers = torch.nn.ModuleList()
149
+ self.res_skip_layers = torch.nn.ModuleList()
150
+ self.drop = nn.Dropout(p_dropout)
151
+
152
+ if gin_channels != 0:
153
+ cond_layer = torch.nn.Conv1d(
154
+ gin_channels, 2 * hidden_channels * n_layers, 1
155
+ )
156
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
157
+
158
+ for i in range(n_layers):
159
+ dilation = dilation_rate**i
160
+ padding = int((kernel_size * dilation - dilation) / 2)
161
+ in_layer = torch.nn.Conv1d(
162
+ hidden_channels,
163
+ 2 * hidden_channels,
164
+ kernel_size,
165
+ dilation=dilation,
166
+ padding=padding,
167
+ )
168
+ in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
169
+ self.in_layers.append(in_layer)
170
+
171
+ # last one is not necessary
172
+ if i < n_layers - 1:
173
+ res_skip_channels = 2 * hidden_channels
174
+ else:
175
+ res_skip_channels = hidden_channels
176
+
177
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
178
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
179
+ self.res_skip_layers.append(res_skip_layer)
180
+
181
+ def forward(self, x, x_mask, g=None, **kwargs):
182
+ output = torch.zeros_like(x)
183
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
184
+
185
+ if g is not None:
186
+ g = self.cond_layer(g)
187
+
188
+ for i in range(self.n_layers):
189
+ x_in = self.in_layers[i](x)
190
+ if g is not None:
191
+ cond_offset = i * 2 * self.hidden_channels
192
+ g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
193
+ else:
194
+ g_l = torch.zeros_like(x_in)
195
+
196
+ acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
197
+ acts = self.drop(acts)
198
+
199
+ res_skip_acts = self.res_skip_layers[i](acts)
200
+ if i < self.n_layers - 1:
201
+ res_acts = res_skip_acts[:, : self.hidden_channels, :]
202
+ x = (x + res_acts) * x_mask
203
+ output = output + res_skip_acts[:, self.hidden_channels :, :]
204
+ else:
205
+ output = output + res_skip_acts
206
+ return output * x_mask
207
+
208
+ def remove_weight_norm(self):
209
+ if self.gin_channels != 0:
210
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
211
+ for l in self.in_layers:
212
+ torch.nn.utils.remove_weight_norm(l)
213
+ for l in self.res_skip_layers:
214
+ torch.nn.utils.remove_weight_norm(l)
215
+
216
+
217
+ class ResBlock1(torch.nn.Module):
218
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
219
+ super(ResBlock1, self).__init__()
220
+ self.convs1 = nn.ModuleList(
221
+ [
222
+ weight_norm(
223
+ Conv1d(
224
+ channels,
225
+ channels,
226
+ kernel_size,
227
+ 1,
228
+ dilation=dilation[0],
229
+ padding=get_padding(kernel_size, dilation[0]),
230
+ )
231
+ ),
232
+ weight_norm(
233
+ Conv1d(
234
+ channels,
235
+ channels,
236
+ kernel_size,
237
+ 1,
238
+ dilation=dilation[1],
239
+ padding=get_padding(kernel_size, dilation[1]),
240
+ )
241
+ ),
242
+ weight_norm(
243
+ Conv1d(
244
+ channels,
245
+ channels,
246
+ kernel_size,
247
+ 1,
248
+ dilation=dilation[2],
249
+ padding=get_padding(kernel_size, dilation[2]),
250
+ )
251
+ ),
252
+ ]
253
+ )
254
+ self.convs1.apply(init_weights)
255
+
256
+ self.convs2 = nn.ModuleList(
257
+ [
258
+ weight_norm(
259
+ Conv1d(
260
+ channels,
261
+ channels,
262
+ kernel_size,
263
+ 1,
264
+ dilation=1,
265
+ padding=get_padding(kernel_size, 1),
266
+ )
267
+ ),
268
+ weight_norm(
269
+ Conv1d(
270
+ channels,
271
+ channels,
272
+ kernel_size,
273
+ 1,
274
+ dilation=1,
275
+ padding=get_padding(kernel_size, 1),
276
+ )
277
+ ),
278
+ weight_norm(
279
+ Conv1d(
280
+ channels,
281
+ channels,
282
+ kernel_size,
283
+ 1,
284
+ dilation=1,
285
+ padding=get_padding(kernel_size, 1),
286
+ )
287
+ ),
288
+ ]
289
+ )
290
+ self.convs2.apply(init_weights)
291
+
292
+ def forward(self, x, x_mask=None):
293
+ for c1, c2 in zip(self.convs1, self.convs2):
294
+ xt = F.leaky_relu(x, LRELU_SLOPE)
295
+ if x_mask is not None:
296
+ xt = xt * x_mask
297
+ xt = c1(xt)
298
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
299
+ if x_mask is not None:
300
+ xt = xt * x_mask
301
+ xt = c2(xt)
302
+ x = xt + x
303
+ if x_mask is not None:
304
+ x = x * x_mask
305
+ return x
306
+
307
+ def remove_weight_norm(self):
308
+ for l in self.convs1:
309
+ remove_weight_norm(l)
310
+ for l in self.convs2:
311
+ remove_weight_norm(l)
312
+
313
+
314
+ class ResBlock2(torch.nn.Module):
315
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
316
+ super(ResBlock2, self).__init__()
317
+ self.convs = nn.ModuleList(
318
+ [
319
+ weight_norm(
320
+ Conv1d(
321
+ channels,
322
+ channels,
323
+ kernel_size,
324
+ 1,
325
+ dilation=dilation[0],
326
+ padding=get_padding(kernel_size, dilation[0]),
327
+ )
328
+ ),
329
+ weight_norm(
330
+ Conv1d(
331
+ channels,
332
+ channels,
333
+ kernel_size,
334
+ 1,
335
+ dilation=dilation[1],
336
+ padding=get_padding(kernel_size, dilation[1]),
337
+ )
338
+ ),
339
+ ]
340
+ )
341
+ self.convs.apply(init_weights)
342
+
343
+ def forward(self, x, x_mask=None):
344
+ for c in self.convs:
345
+ xt = F.leaky_relu(x, LRELU_SLOPE)
346
+ if x_mask is not None:
347
+ xt = xt * x_mask
348
+ xt = c(xt)
349
+ x = xt + x
350
+ if x_mask is not None:
351
+ x = x * x_mask
352
+ return x
353
+
354
+ def remove_weight_norm(self):
355
+ for l in self.convs:
356
+ remove_weight_norm(l)
Vietnam-F-TTS/TTS.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import json
3
+ import re
4
+ import unicodedata
5
+ import numpy as np
6
+ import regex
7
+
8
+ from types import SimpleNamespace
9
+ from LOAD.models import DurationNet, SynthesizerTrn
10
+
11
+ import scipy.io.wavfile as wav
12
+
13
+
14
+ config_file = "config.json"
15
+ duration_model_path = "duration_model.pth"
16
+ lightspeed_model_path = "gen_630k.pth"
17
+ phone_set_file = "phone_set.json"
18
+ device = "cuda" if torch.cuda.is_available() else "cpu"
19
+
20
+ with open(config_file, "rb") as f:
21
+ hps = json.load(f, object_hook=lambda x: SimpleNamespace(**x))
22
+
23
+ # Load phone set json file
24
+ with open(phone_set_file, "r") as f:
25
+ phone_set = json.load(f)
26
+
27
+ assert phone_set[0][1:-1] == "SEP"
28
+ assert "sil" in phone_set
29
+ sil_idx = phone_set.index("sil")
30
+
31
+ space_re = regex.compile(r"\s+")
32
+ number_re = regex.compile("([0-9]+)")
33
+ digits = ["không", "một", "hai", "ba", "bốn", "năm", "sáu", "bảy", "tám", "chín"]
34
+ num_re = regex.compile(r"([0-9.,]*[0-9])")
35
+ alphabet = "aàáảãạăằắẳẵặâầấẩẫậeèéẻẽẹêềếểễệiìíỉĩịoòóỏõọôồốổỗộơờớởỡợuùúủũụưừứửữựyỳýỷỹỵbcdđghklmnpqrstvx"
36
+ keep_text_and_num_re = regex.compile(rf"[^\s{alphabet}.,0-9]")
37
+ keep_text_re = regex.compile(rf"[^\s{alphabet}]")
38
+
39
+
40
+ def read_number(num: str) -> str:
41
+ if len(num) == 1:
42
+ return digits[int(num)]
43
+ elif len(num) == 2 and num.isdigit():
44
+ n = int(num)
45
+ end = digits[n % 10]
46
+ if n == 10:
47
+ return "mười"
48
+ if n % 10 == 5:
49
+ end = "lăm"
50
+ if n % 10 == 0:
51
+ return digits[n // 10] + " mươi"
52
+ elif n < 20:
53
+ return "mười " + end
54
+ else:
55
+ if n % 10 == 1:
56
+ end = "mốt"
57
+ return digits[n // 10] + " mươi " + end
58
+ elif len(num) == 3 and num.isdigit():
59
+ n = int(num)
60
+ if n % 100 == 0:
61
+ return digits[n // 100] + " trăm"
62
+ elif num[1] == "0":
63
+ return digits[n // 100] + " trăm lẻ " + digits[n % 100]
64
+ else:
65
+ return digits[n // 100] + " trăm " + read_number(num[1:])
66
+ elif len(num) >= 4 and len(num) <= 6 and num.isdigit():
67
+ n = int(num)
68
+ n1 = n // 1000
69
+ return read_number(str(n1)) + " ngàn " + read_number(num[-3:])
70
+ elif "," in num:
71
+ n1, n2 = num.split(",")
72
+ return read_number(n1) + " phẩy " + read_number(n2)
73
+ elif "." in num:
74
+ parts = num.split(".")
75
+ if len(parts) == 2:
76
+ if parts[1] == "000":
77
+ return read_number(parts[0]) + " ngàn"
78
+ elif parts[1].startswith("00"):
79
+ end = digits[int(parts[1][2:])]
80
+ return read_number(parts[0]) + " ngàn lẻ " + end
81
+ else:
82
+ return read_number(parts[0]) + " ngàn " + read_number(parts[1])
83
+ elif len(parts) == 3:
84
+ return (
85
+ read_number(parts[0])
86
+ + " triệu "
87
+ + read_number(parts[1])
88
+ + " ngàn "
89
+ + read_number(parts[2])
90
+ )
91
+ return num
92
+
93
+
94
+ def text_to_phone_idx(text):
95
+ # lowercase
96
+ text = text.lower()
97
+ # unicode normalize
98
+ text = unicodedata.normalize("NFKC", text)
99
+ text = text.replace(".", " . ")
100
+ text = text.replace(",", " , ")
101
+ text = text.replace(";", " ; ")
102
+ text = text.replace(":", " : ")
103
+ text = text.replace("!", " ! ")
104
+ text = text.replace("?", " ? ")
105
+ text = text.replace("(", " ( ")
106
+
107
+ text = num_re.sub(r" \1 ", text)
108
+ words = text.split()
109
+ words = [read_number(w) if num_re.fullmatch(w) else w for w in words]
110
+ text = " ".join(words)
111
+
112
+ # remove redundant spaces
113
+ text = re.sub(r"\s+", " ", text)
114
+ # remove leading and trailing spaces
115
+ text = text.strip()
116
+ # convert words to phone indices
117
+ tokens = []
118
+ for c in text:
119
+ # if c is "," or ".", add <sil> phone
120
+ if c in ":,.!?;(":
121
+ tokens.append(sil_idx)
122
+ elif c in phone_set:
123
+ tokens.append(phone_set.index(c))
124
+ elif c == " ":
125
+ # add <sep> phone
126
+ tokens.append(0)
127
+ if tokens[0] != sil_idx:
128
+ # insert <sil> phone at the beginning
129
+ tokens = [sil_idx, 0] + tokens
130
+ if tokens[-1] != sil_idx:
131
+ tokens = tokens + [0, sil_idx]
132
+ return tokens
133
+
134
+
135
+ def text_to_speech(duration_net, generator, text):
136
+ # prevent too long text
137
+ # if len(text) > 500:
138
+ # text = text[:500]
139
+
140
+ phone_idx = text_to_phone_idx(text)
141
+ batch = {
142
+ "phone_idx": np.array([phone_idx]),
143
+ "phone_length": np.array([len(phone_idx)]),
144
+ }
145
+
146
+ # predict phoneme duration
147
+ phone_length = torch.from_numpy(batch["phone_length"].copy()).long().to(device)
148
+ phone_idx = torch.from_numpy(batch["phone_idx"].copy()).long().to(device)
149
+ with torch.inference_mode():
150
+ phone_duration = duration_net(phone_idx, phone_length)[:, :, 0] * 1000
151
+ phone_duration = torch.where(
152
+ phone_idx == sil_idx, torch.clamp_min(phone_duration, 200), phone_duration
153
+ )
154
+ phone_duration = torch.where(phone_idx == 0, 0, phone_duration)
155
+
156
+ # generate waveform
157
+ end_time = torch.cumsum(phone_duration, dim=-1)
158
+ start_time = end_time - phone_duration
159
+ start_frame = start_time / 1000 * hps.data.sampling_rate / hps.data.hop_length
160
+ end_frame = end_time / 1000 * hps.data.sampling_rate / hps.data.hop_length
161
+ spec_length = end_frame.max(dim=-1).values
162
+ pos = torch.arange(0, spec_length.item(), device=device)
163
+ attn = torch.logical_and(
164
+ pos[None, :, None] >= start_frame[:, None, :],
165
+ pos[None, :, None] < end_frame[:, None, :],
166
+ ).float()
167
+ with torch.inference_mode():
168
+ y_hat = generator.infer(
169
+ phone_idx, phone_length, spec_length, attn, max_len=None, noise_scale=0.0
170
+ )[0]
171
+ wave = y_hat[0, 0].data.cpu().numpy()
172
+ return (wave * (2**15)).astype(np.int16)
173
+
174
+
175
+ def load_models():
176
+ duration_net = DurationNet(hps.data.vocab_size, 64, 4).to(device)
177
+ duration_net.load_state_dict(torch.load(duration_model_path, map_location=device))
178
+ duration_net = duration_net.eval()
179
+ generator = SynthesizerTrn(
180
+ hps.data.vocab_size,
181
+ hps.data.filter_length // 2 + 1,
182
+ hps.train.segment_size // hps.data.hop_length,
183
+ **vars(hps.model),
184
+ ).to(device)
185
+ del generator.enc_q
186
+ ckpt = torch.load(lightspeed_model_path, map_location=device)
187
+ params = {}
188
+ for k, v in ckpt["net_g"].items():
189
+ k = k[7:] if k.startswith("module.") else k
190
+ params[k] = v
191
+ generator.load_state_dict(params, strict=False)
192
+ del ckpt, params
193
+ generator = generator.eval()
194
+ return duration_net, generator
195
+
196
+
197
+ def speak(text):
198
+ # Assuming load_models returns duration_net and generator
199
+ duration_net, generator = load_models()
200
+ paragraphs = text.split("\n")
201
+ clips = [] # List to store audio clips
202
+ max_chunk_length = 400 # Maximum number of characters in each chunk
203
+
204
+ for paragraph in paragraphs:
205
+ paragraph = paragraph.strip()
206
+ if paragraph == "":
207
+ continue
208
+ # Split the paragraph into chunks of maximum length max_chunk_length
209
+ chunks = [
210
+ paragraph[i : i + max_chunk_length]
211
+ for i in range(0, len(paragraph), max_chunk_length)
212
+ ]
213
+ for chunk in chunks:
214
+ clips.append(text_to_speech(duration_net, generator, chunk))
215
+ # Assuming text_to_speech converts text to audio clip using the models
216
+ # Append silence if needed
217
+ # clips.append(silence)
218
+
219
+ # Concatenate all audio clips into one
220
+ y = np.concatenate(clips)
221
+ # Assuming hps.data.sampling_rate is defined somewhere
222
+ return hps.data.sampling_rate, y
223
+
224
+
225
+ def textToMp3(text, outWAV):
226
+ sampling_rate, audio = speak(text)
227
+
228
+ # Save the audio data to a WAV file
229
+ wav.write(outWAV, sampling_rate, audio)
230
+
231
+
232
+ textToMp3('bây giờ là mấy giờ', 'test.wav')
Vietnam-F-TTS/config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "learning_rate": 2e-4,
4
+ "betas": [
5
+ 0.8,
6
+ 0.99
7
+ ],
8
+ "eps": 1e-9,
9
+ "lr_decay": 0.999875,
10
+ "segment_size": 8192,
11
+ "c_mel": 45,
12
+ "c_kl": 1.0
13
+ },
14
+ "data": {
15
+ "vocab_size": 256,
16
+ "max_wav_value": 32768.0,
17
+ "sampling_rate": 16000,
18
+ "filter_length": 1024,
19
+ "hop_length": 256,
20
+ "win_length": 1024,
21
+ "n_mel_channels": 80,
22
+ "mel_fmin": 0.0,
23
+ "mel_fmax": null
24
+ },
25
+ "model": {
26
+ "inter_channels": 192,
27
+ "hidden_channels": 192,
28
+ "filter_channels": 768,
29
+ "n_heads": 2,
30
+ "n_layers": 6,
31
+ "kernel_size": 3,
32
+ "p_dropout": 0.1,
33
+ "resblock": "1",
34
+ "resblock_kernel_sizes": [
35
+ 3,
36
+ 7,
37
+ 11
38
+ ],
39
+ "resblock_dilation_sizes": [
40
+ [
41
+ 1,
42
+ 3,
43
+ 5
44
+ ],
45
+ [
46
+ 1,
47
+ 3,
48
+ 5
49
+ ],
50
+ [
51
+ 1,
52
+ 3,
53
+ 5
54
+ ]
55
+ ],
56
+ "upsample_rates": [
57
+ 8,
58
+ 8,
59
+ 2,
60
+ 2
61
+ ],
62
+ "upsample_initial_channel": 512,
63
+ "upsample_kernel_sizes": [
64
+ 16,
65
+ 16,
66
+ 4,
67
+ 4
68
+ ],
69
+ "n_layers_q": 3,
70
+ "use_spectral_norm": false
71
+ }
72
+ }
Vietnam-F-TTS/duration_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e86ab30448a328933b112e5ed6c4c22d7f05f1673528e61d340c98a9cc899eb
3
+ size 1164051
Vietnam-F-TTS/gen_210k.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6f8485f44f492262492231e90633fead68b5db3f65bd1a73621d618a6f3a173
3
+ size 111280752
Vietnam-F-TTS/gen_543k.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e11e4b12e3e9a67ad23ec0cfafbcbd8e810f83d55fb6c38f61a6e9886c94cc5
3
+ size 111280752
Vietnam-F-TTS/gen_630k.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93fd8e41a8138978e387db561e32ad6ca0798cb09d44aefa239add5ac47e13a6
3
+ size 111280317
Vietnam-F-TTS/phone_set.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["[SEP]", "a", "b", "c", "d", "e", "g", "h", "i", "k", "l", "m", "n", "o", "p", "q", "r", "s", "sil", "spn", "t", "u", "v", "x", "y", "\u00e0", "\u00e1", "\u00e2", "\u00e3", "\u00e8", "\u00e9", "\u00ea", "\u00ec", "\u00ed", "\u00f2", "\u00f3", "\u00f4", "\u00f5", "\u00f9", "\u00fa", "\u00fd", "\u0103", "\u0111", "\u0129", "\u0169", "\u01a1", "\u01b0", "\u1ea1", "\u1ea3", "\u1ea5", "\u1ea7", "\u1ea9", "\u1eab", "\u1ead", "\u1eaf", "\u1eb1", "\u1eb3", "\u1eb5", "\u1eb7", "\u1eb9", "\u1ebb", "\u1ebd", "\u1ebf", "\u1ec1", "\u1ec3", "\u1ec5", "\u1ec7", "\u1ec9", "\u1ecb", "\u1ecd", "\u1ecf", "\u1ed1", "\u1ed3", "\u1ed5", "\u1ed7", "\u1ed9", "\u1edb", "\u1edd", "\u1edf", "\u1ee1", "\u1ee3", "\u1ee5", "\u1ee7", "\u1ee9", "\u1eeb", "\u1eed", "\u1eef", "\u1ef1", "\u1ef3", "\u1ef5", "\u1ef7", "\u1ef9"]
Vietnam-F-TTS/requirements.txt ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # requirements.txt
2
+
3
+ # python 3.9
4
+
5
+ aiofiles==23.2.1
6
+ altair==5.3.0
7
+ annotated-types==0.6.0
8
+ anyio==4.3.0
9
+ attrs==23.2.0
10
+ certifi==2024.2.2
11
+ charset-normalizer==3.3.2
12
+ click==8.1.7
13
+ colorama==0.4.6
14
+ contourpy==1.2.1
15
+ cycler==0.12.1
16
+ exceptiongroup==1.2.0
17
+ fastapi==0.110.1
18
+ ffmpy==0.3.2
19
+ filelock==3.13.3
20
+ fonttools==4.50.0
21
+ fsspec==2024.3.1
22
+ gradio==4.25.0
23
+ gradio_client==0.15.0
24
+ h11==0.14.0
25
+ httpcore==1.0.5
26
+ httpx==0.27.0
27
+ huggingface-hub==0.22.2
28
+ idna==3.6
29
+ importlib_resources==6.4.0
30
+ Jinja2==3.1.3
31
+ jsonschema==4.21.1
32
+ jsonschema-specifications==2023.12.1
33
+ kiwisolver==1.4.5
34
+ markdown-it-py==3.0.0
35
+ MarkupSafe==2.1.5
36
+ matplotlib==3.8.3
37
+ mdurl==0.1.2
38
+ mpmath==1.3.0
39
+ networkx==3.2.1
40
+ numpy==1.26.4
41
+ orjson==3.10.0
42
+ packaging==24.0
43
+ pandas==2.2.1
44
+ pillow==10.3.0
45
+ pydantic==2.6.4
46
+ pydantic_core==2.16.3
47
+ pydub==0.25.1
48
+ Pygments==2.17.2
49
+ pyparsing==3.1.2
50
+ python-dateutil==2.9.0.post0
51
+ python-multipart==0.0.9
52
+ pytz==2024.1
53
+ PyYAML==6.0.1
54
+ referencing==0.34.0
55
+ regex==2023.12.25
56
+ requests==2.31.0
57
+ rich==13.7.1
58
+ rpds-py==0.18.0
59
+ ruff==0.3.5
60
+ scipy==1.13.0
61
+ semantic-version==2.10.0
62
+ shellingham==1.5.4
63
+ six==1.16.0
64
+ sniffio==1.3.1
65
+ starlette==0.37.2
66
+ sympy==1.12
67
+ tomlkit==0.12.0
68
+ toolz==0.12.1
69
+ torch==2.2.2
70
+ tqdm==4.66.2
71
+ typer==0.12.0
72
+ typer-cli==0.12.0
73
+ typer-slim==0.12.0
74
+ typing_extensions==4.10.0
75
+ tzdata==2024.1
76
+ urllib3==2.2.1
77
+ uvicorn==0.29.0
78
+ websockets==11.0.3
79
+ zipp==3.18.1
Vietnam-F-TTS/test.wav ADDED
Binary file (59.4 kB). View file
 
mms-tts-vie/config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.1,
3
+ "architectures": [
4
+ "VitsModel"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "depth_separable_channels": 2,
8
+ "depth_separable_num_layers": 3,
9
+ "duration_predictor_dropout": 0.5,
10
+ "duration_predictor_filter_channels": 256,
11
+ "duration_predictor_flow_bins": 10,
12
+ "duration_predictor_kernel_size": 3,
13
+ "duration_predictor_num_flows": 4,
14
+ "duration_predictor_tail_bound": 5.0,
15
+ "ffn_dim": 768,
16
+ "ffn_kernel_size": 3,
17
+ "flow_size": 192,
18
+ "hidden_act": "relu",
19
+ "hidden_dropout": 0.1,
20
+ "hidden_size": 192,
21
+ "initializer_range": 0.02,
22
+ "layer_norm_eps": 1e-05,
23
+ "layerdrop": 0.1,
24
+ "leaky_relu_slope": 0.1,
25
+ "model_type": "vits",
26
+ "noise_scale": 0.667,
27
+ "noise_scale_duration": 0.8,
28
+ "num_attention_heads": 2,
29
+ "num_hidden_layers": 6,
30
+ "num_speakers": 1,
31
+ "posterior_encoder_num_wavenet_layers": 16,
32
+ "prior_encoder_num_flows": 4,
33
+ "prior_encoder_num_wavenet_layers": 4,
34
+ "resblock_dilation_sizes": [
35
+ [
36
+ 1,
37
+ 3,
38
+ 5
39
+ ],
40
+ [
41
+ 1,
42
+ 3,
43
+ 5
44
+ ],
45
+ [
46
+ 1,
47
+ 3,
48
+ 5
49
+ ]
50
+ ],
51
+ "resblock_kernel_sizes": [
52
+ 3,
53
+ 7,
54
+ 11
55
+ ],
56
+ "sampling_rate": 16000,
57
+ "speaker_embedding_size": 0,
58
+ "speaking_rate": 1.0,
59
+ "spectrogram_bins": 513,
60
+ "torch_dtype": "float32",
61
+ "transformers_version": "4.33.0.dev0",
62
+ "upsample_initial_channel": 512,
63
+ "upsample_kernel_sizes": [
64
+ 16,
65
+ 16,
66
+ 4,
67
+ 4
68
+ ],
69
+ "upsample_rates": [
70
+ 8,
71
+ 8,
72
+ 2,
73
+ 2
74
+ ],
75
+ "use_bias": true,
76
+ "use_stochastic_duration_prediction": true,
77
+ "vocab_size": 95,
78
+ "wavenet_dilation_rate": 1,
79
+ "wavenet_dropout": 0.0,
80
+ "wavenet_kernel_size": 5,
81
+ "window_size": 4
82
+ }
mms-tts-vie/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55ded90c3e57dc2814fa2cdfe3f9e7a5c28e1223b06c0a260a4495b080762ffd
3
+ size 145271288
mms-tts-vie/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aab7d240fb0b6c83474a15affcb70194742af8dbbf79083deb6684e162ff0cb5
3
+ size 145432498
mms-tts-vie/special_tokens_map.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "pad_token": "ụ",
3
+ "unk_token": "<unk>"
4
+ }
mms-tts-vie/tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_blank": true,
3
+ "clean_up_tokenization_spaces": true,
4
+ "is_uroman": false,
5
+ "language": "vie",
6
+ "model_max_length": 1000000000000000019884624838656,
7
+ "normalize": true,
8
+ "pad_token": "ụ",
9
+ "phonemize": false,
10
+ "tokenizer_class": "VitsTokenizer",
11
+ "unk_token": "<unk>"
12
+ }
mms-tts-vie/vocab.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ " ": 84,
3
+ "'": 44,
4
+ "-": 94,
5
+ "2": 52,
6
+ "_": 17,
7
+ "a": 29,
8
+ "b": 88,
9
+ "c": 13,
10
+ "d": 63,
11
+ "e": 54,
12
+ "g": 21,
13
+ "h": 85,
14
+ "i": 30,
15
+ "k": 79,
16
+ "l": 82,
17
+ "m": 68,
18
+ "n": 90,
19
+ "o": 31,
20
+ "p": 78,
21
+ "q": 47,
22
+ "r": 92,
23
+ "s": 2,
24
+ "t": 80,
25
+ "u": 8,
26
+ "v": 14,
27
+ "x": 1,
28
+ "y": 75,
29
+ "à": 35,
30
+ "á": 77,
31
+ "â": 12,
32
+ "ã": 51,
33
+ "è": 3,
34
+ "é": 58,
35
+ "ê": 91,
36
+ "ì": 4,
37
+ "í": 74,
38
+ "ò": 45,
39
+ "ó": 56,
40
+ "ô": 28,
41
+ "õ": 25,
42
+ "ù": 38,
43
+ "ú": 76,
44
+ "ý": 37,
45
+ "ă": 89,
46
+ "đ": 55,
47
+ "ĩ": 23,
48
+ "ũ": 70,
49
+ "ơ": 7,
50
+ "ư": 9,
51
+ "ạ": 22,
52
+ "ả": 24,
53
+ "ấ": 81,
54
+ "ầ": 57,
55
+ "ẩ": 49,
56
+ "ẫ": 67,
57
+ "ậ": 87,
58
+ "ắ": 65,
59
+ "ằ": 10,
60
+ "ẳ": 27,
61
+ "ẵ": 42,
62
+ "ặ": 5,
63
+ "ẹ": 72,
64
+ "ẻ": 20,
65
+ "ẽ": 66,
66
+ "ế": 60,
67
+ "ề": 40,
68
+ "ể": 69,
69
+ "ễ": 41,
70
+ "ệ": 15,
71
+ "ỉ": 71,
72
+ "ị": 53,
73
+ "ọ": 48,
74
+ "ỏ": 43,
75
+ "ố": 46,
76
+ "ồ": 16,
77
+ "ổ": 34,
78
+ "ỗ": 73,
79
+ "ộ": 19,
80
+ "ớ": 59,
81
+ "ờ": 36,
82
+ "ở": 83,
83
+ "ỡ": 26,
84
+ "ợ": 93,
85
+ "ụ": 0,
86
+ "ủ": 61,
87
+ "ứ": 6,
88
+ "ừ": 32,
89
+ "ử": 62,
90
+ "ữ": 64,
91
+ "ự": 50,
92
+ "ỳ": 11,
93
+ "ỵ": 18,
94
+ "ỷ": 86,
95
+ "ỹ": 33,
96
+ "–": 39
97
+ }