Plachta commited on
Commit
ed52a29
·
1 Parent(s): 517dfba

Delete train_ms.py

Browse files
Files changed (1) hide show
  1. train_ms.py +0 -306
train_ms.py DELETED
@@ -1,306 +0,0 @@
1
- import os
2
- import json
3
- import argparse
4
- import itertools
5
- import math
6
- import torch
7
- from torch import nn, optim
8
- from torch.nn import functional as F
9
- from torch.utils.data import DataLoader
10
- from torch.utils.tensorboard import SummaryWriter
11
- import torch.multiprocessing as mp
12
- import torch.distributed as dist
13
- from torch.nn.parallel import DistributedDataParallel as DDP
14
- from torch.cuda.amp import autocast, GradScaler
15
- from tqdm import tqdm
16
-
17
- import librosa
18
- import logging
19
-
20
- logging.getLogger('numba').setLevel(logging.WARNING)
21
-
22
- import commons
23
- import utils
24
- from data_utils import (
25
- TextAudioSpeakerLoader,
26
- TextAudioSpeakerCollate,
27
- DistributedBucketSampler
28
- )
29
- from models import (
30
- SynthesizerTrn,
31
- MultiPeriodDiscriminator,
32
- )
33
- from losses import (
34
- generator_loss,
35
- discriminator_loss,
36
- feature_loss,
37
- kl_loss
38
- )
39
- from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
40
- from text.symbols import symbols
41
-
42
-
43
- torch.backends.cudnn.benchmark = True
44
- global_step = 0
45
-
46
-
47
- def main():
48
- """Assume Single Node Multi GPUs Training Only"""
49
- assert torch.cuda.is_available(), "CPU training is not allowed."
50
-
51
- n_gpus = torch.cuda.device_count()
52
- os.environ['MASTER_ADDR'] = 'localhost'
53
- os.environ['MASTER_PORT'] = '8000'
54
-
55
- hps = utils.get_hparams()
56
- mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
57
-
58
-
59
- def run(rank, n_gpus, hps):
60
- global global_step
61
- if rank == 0:
62
- logger = utils.get_logger(hps.model_dir)
63
- logger.info(hps)
64
- utils.check_git_hash(hps.model_dir)
65
- writer = SummaryWriter(log_dir=hps.model_dir)
66
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
67
-
68
- dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
69
- torch.manual_seed(hps.train.seed)
70
- torch.cuda.set_device(rank)
71
-
72
- train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
73
- train_sampler = DistributedBucketSampler(
74
- train_dataset,
75
- hps.train.batch_size,
76
- [32,300,400,500,600,700,800,900,1000],
77
- num_replicas=n_gpus,
78
- rank=rank,
79
- shuffle=True)
80
- collate_fn = TextAudioSpeakerCollate()
81
- train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True,
82
- collate_fn=collate_fn, batch_sampler=train_sampler)
83
- if rank == 0:
84
- eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
85
- eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False,
86
- batch_size=hps.train.batch_size, pin_memory=True,
87
- drop_last=False, collate_fn=collate_fn)
88
-
89
- net_g = SynthesizerTrn(
90
- len(symbols),
91
- hps.data.filter_length // 2 + 1,
92
- hps.train.segment_size // hps.data.hop_length,
93
- n_speakers=hps.data.n_speakers,
94
- **hps.model).cuda(rank)
95
- net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
96
- optim_g = torch.optim.AdamW(
97
- net_g.parameters(),
98
- hps.train.learning_rate,
99
- betas=hps.train.betas,
100
- eps=hps.train.eps)
101
- optim_d = torch.optim.AdamW(
102
- net_d.parameters(),
103
- hps.train.learning_rate,
104
- betas=hps.train.betas,
105
- eps=hps.train.eps)
106
- net_g = DDP(net_g, device_ids=[rank])
107
- net_d = DDP(net_d, device_ids=[rank])
108
-
109
- try:
110
- _, _, _, epoch_str = utils.load_checkpoint(hps.model_dir.strip("./drive/MyDrive\\"), net_g, None)
111
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d)
112
- global_step = (epoch_str - 1) * len(train_loader)
113
- except:
114
- epoch_str = 1
115
- global_step = 0
116
-
117
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
118
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
119
-
120
- scaler = GradScaler(enabled=hps.train.fp16_run)
121
-
122
- for epoch in range(epoch_str, hps.train.epochs + 1):
123
- if rank==0:
124
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
125
- else:
126
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
127
- scheduler_g.step()
128
- scheduler_d.step()
129
-
130
-
131
- def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
132
- net_g, net_d = nets
133
- optim_g, optim_d = optims
134
- scheduler_g, scheduler_d = schedulers
135
- train_loader, eval_loader = loaders
136
- if writers is not None:
137
- writer, writer_eval = writers
138
-
139
- train_loader.batch_sampler.set_epoch(epoch)
140
- global global_step
141
-
142
- net_g.train()
143
- net_d.train()
144
- for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(tqdm(train_loader)):
145
- x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
146
- spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
147
- y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
148
- speakers = speakers.cuda(rank, non_blocking=True)
149
-
150
- with autocast(enabled=hps.train.fp16_run):
151
- y_hat, l_length, attn, ids_slice, x_mask, z_mask,\
152
- (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths, speakers)
153
-
154
- mel = spec_to_mel_torch(
155
- spec,
156
- hps.data.filter_length,
157
- hps.data.n_mel_channels,
158
- hps.data.sampling_rate,
159
- hps.data.mel_fmin,
160
- hps.data.mel_fmax)
161
- y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
162
- y_hat_mel = mel_spectrogram_torch(
163
- y_hat.squeeze(1),
164
- hps.data.filter_length,
165
- hps.data.n_mel_channels,
166
- hps.data.sampling_rate,
167
- hps.data.hop_length,
168
- hps.data.win_length,
169
- hps.data.mel_fmin,
170
- hps.data.mel_fmax
171
- )
172
-
173
- y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
174
-
175
- # Discriminator
176
- y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
177
- with autocast(enabled=False):
178
- loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
179
- loss_disc_all = loss_disc
180
- optim_d.zero_grad()
181
- scaler.scale(loss_disc_all).backward()
182
- scaler.unscale_(optim_d)
183
- grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
184
- scaler.step(optim_d)
185
-
186
- with autocast(enabled=hps.train.fp16_run):
187
- # Generator
188
- y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
189
- with autocast(enabled=False):
190
- loss_dur = torch.sum(l_length.float())
191
- loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
192
- loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
193
-
194
- loss_fm = feature_loss(fmap_r, fmap_g)
195
- loss_gen, losses_gen = generator_loss(y_d_hat_g)
196
- loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
197
- optim_g.zero_grad()
198
- scaler.scale(loss_gen_all).backward()
199
- scaler.unscale_(optim_g)
200
- grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
201
- scaler.step(optim_g)
202
- scaler.update()
203
-
204
- if rank==0:
205
- if global_step % hps.train.log_interval == 0:
206
- lr = optim_g.param_groups[0]['lr']
207
- losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
208
- logger.info('Train Epoch: {} [{:.0f}%]'.format(
209
- epoch,
210
- 100. * batch_idx / len(train_loader)))
211
- logger.info([x.item() for x in losses] + [global_step, lr])
212
-
213
- scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
214
- scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
215
-
216
- scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
217
- scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
218
- scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
219
- image_dict = {
220
- "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
221
- "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
222
- "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
223
- "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
224
- }
225
- utils.summarize(
226
- writer=writer,
227
- global_step=global_step,
228
- images=image_dict,
229
- scalars=scalar_dict)
230
-
231
- if global_step % hps.train.eval_interval == 0:
232
- evaluate(hps, net_g, eval_loader, writer_eval)
233
- utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
234
- utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
235
- old_g=os.path.join(hps.model_dir, "G_{}.pth".format(global_step-2000))
236
- old_d=os.path.join(hps.model_dir, "D_{}.pth".format(global_step-2000))
237
- if os.path.exists(old_g):
238
- os.remove(old_g)
239
- if os.path.exists(old_d):
240
- os.remove(old_d)
241
- global_step += 1
242
-
243
- if rank == 0:
244
- logger.info('====> Epoch: {}'.format(epoch))
245
-
246
-
247
- def evaluate(hps, generator, eval_loader, writer_eval):
248
- generator.eval()
249
- with torch.no_grad():
250
- for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(eval_loader):
251
- x, x_lengths = x.cuda(0), x_lengths.cuda(0)
252
- spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
253
- y, y_lengths = y.cuda(0), y_lengths.cuda(0)
254
- speakers = speakers.cuda(0)
255
-
256
- # remove else
257
- x = x[:1]
258
- x_lengths = x_lengths[:1]
259
- spec = spec[:1]
260
- spec_lengths = spec_lengths[:1]
261
- y = y[:1]
262
- y_lengths = y_lengths[:1]
263
- speakers = speakers[:1]
264
- break
265
- y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, max_len=1000)
266
- y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
267
-
268
- mel = spec_to_mel_torch(
269
- spec,
270
- hps.data.filter_length,
271
- hps.data.n_mel_channels,
272
- hps.data.sampling_rate,
273
- hps.data.mel_fmin,
274
- hps.data.mel_fmax)
275
- y_hat_mel = mel_spectrogram_torch(
276
- y_hat.squeeze(1).float(),
277
- hps.data.filter_length,
278
- hps.data.n_mel_channels,
279
- hps.data.sampling_rate,
280
- hps.data.hop_length,
281
- hps.data.win_length,
282
- hps.data.mel_fmin,
283
- hps.data.mel_fmax
284
- )
285
- image_dict = {
286
- "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
287
- }
288
- audio_dict = {
289
- "gen/audio": y_hat[0,:,:y_hat_lengths[0]]
290
- }
291
- if global_step == 0:
292
- image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
293
- audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
294
-
295
- utils.summarize(
296
- writer=writer_eval,
297
- global_step=global_step,
298
- images=image_dict,
299
- audios=audio_dict,
300
- audio_sampling_rate=hps.data.sampling_rate
301
- )
302
- generator.train()
303
-
304
-
305
- if __name__ == "__main__":
306
- main()