Staticaliza commited on
Commit
f9d08c4
1 Parent(s): c8535ea

Create model/cfm.py

Browse files
Files changed (1) hide show
  1. model/cfm.py +279 -0
model/cfm.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ein notation:
3
+ b - batch
4
+ n - sequence
5
+ nt - text sequence
6
+ nw - raw wave length
7
+ d - dimension
8
+ """
9
+
10
+ from __future__ import annotations
11
+ from typing import Callable
12
+ from random import random
13
+
14
+ import torch
15
+ from torch import nn
16
+ import torch.nn.functional as F
17
+ from torch.nn.utils.rnn import pad_sequence
18
+
19
+ from torchdiffeq import odeint
20
+
21
+ from einops import rearrange
22
+
23
+ from model.modules import MelSpec
24
+
25
+ from model.utils import (
26
+ default, exists,
27
+ list_str_to_idx, list_str_to_tensor,
28
+ lens_to_mask, mask_from_frac_lengths,
29
+ )
30
+
31
+
32
+ class CFM(nn.Module):
33
+ def __init__(
34
+ self,
35
+ transformer: nn.Module,
36
+ sigma = 0.,
37
+ odeint_kwargs: dict = dict(
38
+ # atol = 1e-5,
39
+ # rtol = 1e-5,
40
+ method = 'euler' # 'midpoint'
41
+ ),
42
+ audio_drop_prob = 0.3,
43
+ cond_drop_prob = 0.2,
44
+ num_channels = None,
45
+ mel_spec_module: nn.Module | None = None,
46
+ mel_spec_kwargs: dict = dict(),
47
+ frac_lengths_mask: tuple[float, float] = (0.7, 1.),
48
+ vocab_char_map: dict[str: int] | None = None
49
+ ):
50
+ super().__init__()
51
+
52
+ self.frac_lengths_mask = frac_lengths_mask
53
+
54
+ # mel spec
55
+ self.mel_spec = default(mel_spec_module, MelSpec(**mel_spec_kwargs))
56
+ num_channels = default(num_channels, self.mel_spec.n_mel_channels)
57
+ self.num_channels = num_channels
58
+
59
+ # classifier-free guidance
60
+ self.audio_drop_prob = audio_drop_prob
61
+ self.cond_drop_prob = cond_drop_prob
62
+
63
+ # transformer
64
+ self.transformer = transformer
65
+ dim = transformer.dim
66
+ self.dim = dim
67
+
68
+ # conditional flow related
69
+ self.sigma = sigma
70
+
71
+ # sampling related
72
+ self.odeint_kwargs = odeint_kwargs
73
+
74
+ # vocab map for tokenization
75
+ self.vocab_char_map = vocab_char_map
76
+
77
+ @property
78
+ def device(self):
79
+ return next(self.parameters()).device
80
+
81
+ @torch.no_grad()
82
+ def sample(
83
+ self,
84
+ cond: float['b n d'] | float['b nw'],
85
+ text: int['b nt'] | list[str],
86
+ duration: int | int['b'],
87
+ *,
88
+ lens: int['b'] | None = None,
89
+ steps = 32,
90
+ cfg_strength = 1.,
91
+ sway_sampling_coef = None,
92
+ seed: int | None = None,
93
+ max_duration = 4096,
94
+ vocoder: Callable[[float['b d n']], float['b nw']] | None = None,
95
+ no_ref_audio = False,
96
+ duplicate_test = False,
97
+ t_inter = 0.1,
98
+ edit_mask = None,
99
+ ):
100
+ self.eval()
101
+
102
+ # raw wave
103
+
104
+ if cond.ndim == 2:
105
+ cond = self.mel_spec(cond)
106
+ cond = rearrange(cond, 'b d n -> b n d')
107
+ assert cond.shape[-1] == self.num_channels
108
+
109
+ batch, cond_seq_len, device = *cond.shape[:2], cond.device
110
+ if not exists(lens):
111
+ lens = torch.full((batch,), cond_seq_len, device = device, dtype = torch.long)
112
+
113
+ # text
114
+
115
+ if isinstance(text, list):
116
+ if exists(self.vocab_char_map):
117
+ text = list_str_to_idx(text, self.vocab_char_map).to(device)
118
+ else:
119
+ text = list_str_to_tensor(text).to(device)
120
+ assert text.shape[0] == batch
121
+
122
+ if exists(text):
123
+ text_lens = (text != -1).sum(dim = -1)
124
+ lens = torch.maximum(text_lens, lens) # make sure lengths are at least those of the text characters
125
+
126
+ # duration
127
+
128
+ cond_mask = lens_to_mask(lens)
129
+ if edit_mask is not None:
130
+ cond_mask = cond_mask & edit_mask
131
+
132
+ if isinstance(duration, int):
133
+ duration = torch.full((batch,), duration, device = device, dtype = torch.long)
134
+
135
+ duration = torch.maximum(lens + 1, duration) # just add one token so something is generated
136
+ duration = duration.clamp(max = max_duration)
137
+ max_duration = duration.amax()
138
+
139
+ # duplicate test corner for inner time step oberservation
140
+ if duplicate_test:
141
+ test_cond = F.pad(cond, (0, 0, cond_seq_len, max_duration - 2*cond_seq_len), value = 0.)
142
+
143
+ cond = F.pad(cond, (0, 0, 0, max_duration - cond_seq_len), value = 0.)
144
+ cond_mask = F.pad(cond_mask, (0, max_duration - cond_mask.shape[-1]), value = False)
145
+ cond_mask = rearrange(cond_mask, '... -> ... 1')
146
+ step_cond = torch.where(cond_mask, cond, torch.zeros_like(cond)) # allow direct control (cut cond audio) with lens passed in
147
+
148
+ if batch > 1:
149
+ mask = lens_to_mask(duration)
150
+ else: # save memory and speed up, as single inference need no mask currently
151
+ mask = None
152
+
153
+ # test for no ref audio
154
+ if no_ref_audio:
155
+ cond = torch.zeros_like(cond)
156
+
157
+ # neural ode
158
+
159
+ def fn(t, x):
160
+ # at each step, conditioning is fixed
161
+ # step_cond = torch.where(cond_mask, cond, torch.zeros_like(cond))
162
+
163
+ # predict flow
164
+ pred = self.transformer(x = x, cond = step_cond, text = text, time = t, mask = mask, drop_audio_cond = False, drop_text = False)
165
+ if cfg_strength < 1e-5:
166
+ return pred
167
+
168
+ null_pred = self.transformer(x = x, cond = step_cond, text = text, time = t, mask = mask, drop_audio_cond = True, drop_text = True)
169
+ return pred + (pred - null_pred) * cfg_strength
170
+
171
+ # noise input
172
+ # to make sure batch inference result is same with different batch size, and for sure single inference
173
+ # still some difference maybe due to convolutional layers
174
+ y0 = []
175
+ for dur in duration:
176
+ if exists(seed):
177
+ torch.manual_seed(seed)
178
+ y0.append(torch.randn(dur, self.num_channels, device = self.device))
179
+ y0 = pad_sequence(y0, padding_value = 0, batch_first = True)
180
+
181
+ t_start = 0
182
+
183
+ # duplicate test corner for inner time step oberservation
184
+ if duplicate_test:
185
+ t_start = t_inter
186
+ y0 = (1 - t_start) * y0 + t_start * test_cond
187
+ steps = int(steps * (1 - t_start))
188
+
189
+ t = torch.linspace(t_start, 1, steps, device = self.device)
190
+ if sway_sampling_coef is not None:
191
+ t = t + sway_sampling_coef * (torch.cos(torch.pi / 2 * t) - 1 + t)
192
+
193
+ trajectory = odeint(fn, y0, t, **self.odeint_kwargs)
194
+
195
+ sampled = trajectory[-1]
196
+ out = sampled
197
+ out = torch.where(cond_mask, cond, out)
198
+
199
+ if exists(vocoder):
200
+ out = rearrange(out, 'b n d -> b d n')
201
+ out = vocoder(out)
202
+
203
+ return out, trajectory
204
+
205
+ def forward(
206
+ self,
207
+ inp: float['b n d'] | float['b nw'], # mel or raw wave
208
+ text: int['b nt'] | list[str],
209
+ *,
210
+ lens: int['b'] | None = None,
211
+ noise_scheduler: str | None = None,
212
+ ):
213
+ # handle raw wave
214
+ if inp.ndim == 2:
215
+ inp = self.mel_spec(inp)
216
+ inp = rearrange(inp, 'b d n -> b n d')
217
+ assert inp.shape[-1] == self.num_channels
218
+
219
+ batch, seq_len, dtype, device, σ1 = *inp.shape[:2], inp.dtype, self.device, self.sigma
220
+
221
+ # handle text as string
222
+ if isinstance(text, list):
223
+ if exists(self.vocab_char_map):
224
+ text = list_str_to_idx(text, self.vocab_char_map).to(device)
225
+ else:
226
+ text = list_str_to_tensor(text).to(device)
227
+ assert text.shape[0] == batch
228
+
229
+ # lens and mask
230
+ if not exists(lens):
231
+ lens = torch.full((batch,), seq_len, device = device)
232
+
233
+ mask = lens_to_mask(lens, length = seq_len) # useless here, as collate_fn will pad to max length in batch
234
+
235
+ # get a random span to mask out for training conditionally
236
+ frac_lengths = torch.zeros((batch,), device = self.device).float().uniform_(*self.frac_lengths_mask)
237
+ rand_span_mask = mask_from_frac_lengths(lens, frac_lengths)
238
+
239
+ if exists(mask):
240
+ rand_span_mask &= mask
241
+
242
+ # mel is x1
243
+ x1 = inp
244
+
245
+ # x0 is gaussian noise
246
+ x0 = torch.randn_like(x1)
247
+
248
+ # time step
249
+ time = torch.rand((batch,), dtype = dtype, device = self.device)
250
+ # TODO. noise_scheduler
251
+
252
+ # sample xt (φ_t(x) in the paper)
253
+ t = rearrange(time, 'b -> b 1 1')
254
+ φ = (1 - t) * x0 + t * x1
255
+ flow = x1 - x0
256
+
257
+ # only predict what is within the random mask span for infilling
258
+ cond = torch.where(
259
+ rand_span_mask[..., None],
260
+ torch.zeros_like(x1), x1
261
+ )
262
+
263
+ # transformer and cfg training with a drop rate
264
+ drop_audio_cond = random() < self.audio_drop_prob # p_drop in voicebox paper
265
+ if random() < self.cond_drop_prob: # p_uncond in voicebox paper
266
+ drop_audio_cond = True
267
+ drop_text = True
268
+ else:
269
+ drop_text = False
270
+
271
+ # if want rigourously mask out padding, record in collate_fn in dataset.py, and pass in here
272
+ # adding mask will use more memory, thus also need to adjust batchsampler with scaled down threshold for long sequences
273
+ pred = self.transformer(x = φ, cond = cond, text = text, time = time, drop_audio_cond = drop_audio_cond, drop_text = drop_text)
274
+
275
+ # flow matching loss
276
+ loss = F.mse_loss(pred, flow, reduction = 'none')
277
+ loss = loss[rand_span_mask]
278
+
279
+ return loss.mean(), cond, pred