Plachta commited on
Commit
2e73ae9
·
1 Parent(s): ed52a29

Delete train.py

Browse files
Files changed (1) hide show
  1. train.py +0 -301
train.py DELETED
@@ -1,301 +0,0 @@
1
- import os
2
- import json
3
- import argparse
4
- import itertools
5
- import math
6
- import torch
7
- from torch import nn, optim
8
- from torch.nn import functional as F
9
- from torch.utils.data import DataLoader
10
- from torch.utils.tensorboard import SummaryWriter
11
- import torch.multiprocessing as mp
12
- import torch.distributed as dist
13
- from torch.nn.parallel import DistributedDataParallel as DDP
14
- from torch.cuda.amp import autocast, GradScaler
15
-
16
- import librosa
17
- import logging
18
-
19
- logging.getLogger('numba').setLevel(logging.WARNING)
20
-
21
- import commons
22
- import utils
23
- from data_utils import (
24
- TextAudioLoader,
25
- TextAudioCollate,
26
- DistributedBucketSampler
27
- )
28
- from models import (
29
- SynthesizerTrn,
30
- MultiPeriodDiscriminator,
31
- )
32
- from losses import (
33
- generator_loss,
34
- discriminator_loss,
35
- feature_loss,
36
- kl_loss
37
- )
38
- from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
39
- from text.symbols import symbols
40
-
41
-
42
- torch.backends.cudnn.benchmark = True
43
- global_step = 0
44
-
45
-
46
- def main():
47
- """Assume Single Node Multi GPUs Training Only"""
48
- assert torch.cuda.is_available(), "CPU training is not allowed."
49
-
50
- n_gpus = torch.cuda.device_count()
51
- os.environ['MASTER_ADDR'] = 'localhost'
52
- os.environ['MASTER_PORT'] = '8000'
53
-
54
- hps = utils.get_hparams()
55
- mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
56
-
57
-
58
- def run(rank, n_gpus, hps):
59
- global global_step
60
- if rank == 0:
61
- logger = utils.get_logger(hps.model_dir)
62
- logger.info(hps)
63
- utils.check_git_hash(hps.model_dir)
64
- writer = SummaryWriter(log_dir=hps.model_dir)
65
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
66
-
67
- dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
68
- torch.manual_seed(hps.train.seed)
69
- torch.cuda.set_device(rank)
70
-
71
- train_dataset = TextAudioLoader(hps.data.training_files, hps.data)
72
- train_sampler = DistributedBucketSampler(
73
- train_dataset,
74
- hps.train.batch_size,
75
- [32,300,400,500,600,700,800,900,1000],
76
- num_replicas=n_gpus,
77
- rank=rank,
78
- shuffle=True)
79
- collate_fn = TextAudioCollate()
80
- train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True,
81
- collate_fn=collate_fn, batch_sampler=train_sampler)
82
- if rank == 0:
83
- eval_dataset = TextAudioLoader(hps.data.validation_files, hps.data)
84
- eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False,
85
- batch_size=hps.train.batch_size, pin_memory=True,
86
- drop_last=False, collate_fn=collate_fn)
87
-
88
- net_g = SynthesizerTrn(
89
- len(symbols),
90
- hps.data.filter_length // 2 + 1,
91
- hps.train.segment_size // hps.data.hop_length,
92
- **hps.model).cuda(rank)
93
- net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
94
- optim_g = torch.optim.AdamW(
95
- net_g.parameters(),
96
- hps.train.learning_rate,
97
- betas=hps.train.betas,
98
- eps=hps.train.eps)
99
- optim_d = torch.optim.AdamW(
100
- net_d.parameters(),
101
- hps.train.learning_rate,
102
- betas=hps.train.betas,
103
- eps=hps.train.eps)
104
- net_g = DDP(net_g, device_ids=[rank])
105
- net_d = DDP(net_d, device_ids=[rank])
106
-
107
- try:
108
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g)
109
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d)
110
- global_step = (epoch_str - 1) * len(train_loader)
111
- except:
112
- epoch_str = 1
113
- global_step = 0
114
-
115
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
116
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
117
-
118
- scaler = GradScaler(enabled=hps.train.fp16_run)
119
-
120
- for epoch in range(epoch_str, hps.train.epochs + 1):
121
- if rank==0:
122
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
123
- else:
124
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
125
- scheduler_g.step()
126
- scheduler_d.step()
127
-
128
-
129
- def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
130
- net_g, net_d = nets
131
- optim_g, optim_d = optims
132
- scheduler_g, scheduler_d = schedulers
133
- train_loader, eval_loader = loaders
134
- if writers is not None:
135
- writer, writer_eval = writers
136
-
137
- train_loader.batch_sampler.set_epoch(epoch)
138
- global global_step
139
-
140
- net_g.train()
141
- net_d.train()
142
- for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(train_loader):
143
- x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
144
- spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
145
- y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
146
-
147
- with autocast(enabled=hps.train.fp16_run):
148
- y_hat, l_length, attn, ids_slice, x_mask, z_mask,\
149
- (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths)
150
-
151
- mel = spec_to_mel_torch(
152
- spec,
153
- hps.data.filter_length,
154
- hps.data.n_mel_channels,
155
- hps.data.sampling_rate,
156
- hps.data.mel_fmin,
157
- hps.data.mel_fmax)
158
- y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
159
- y_hat_mel = mel_spectrogram_torch(
160
- y_hat.squeeze(1),
161
- hps.data.filter_length,
162
- hps.data.n_mel_channels,
163
- hps.data.sampling_rate,
164
- hps.data.hop_length,
165
- hps.data.win_length,
166
- hps.data.mel_fmin,
167
- hps.data.mel_fmax
168
- )
169
-
170
- y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
171
-
172
- # Discriminator
173
- y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
174
- with autocast(enabled=False):
175
- loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
176
- loss_disc_all = loss_disc
177
- optim_d.zero_grad()
178
- scaler.scale(loss_disc_all).backward()
179
- scaler.unscale_(optim_d)
180
- grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
181
- scaler.step(optim_d)
182
-
183
- with autocast(enabled=hps.train.fp16_run):
184
- # Generator
185
- y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
186
- with autocast(enabled=False):
187
- loss_dur = torch.sum(l_length.float())
188
- loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
189
- loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
190
-
191
- loss_fm = feature_loss(fmap_r, fmap_g)
192
- loss_gen, losses_gen = generator_loss(y_d_hat_g)
193
- loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
194
- optim_g.zero_grad()
195
- scaler.scale(loss_gen_all).backward()
196
- scaler.unscale_(optim_g)
197
- grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
198
- scaler.step(optim_g)
199
- scaler.update()
200
-
201
- if rank==0:
202
- if global_step % hps.train.log_interval == 0:
203
- lr = optim_g.param_groups[0]['lr']
204
- losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
205
- logger.info('Train Epoch: {} [{:.0f}%]'.format(
206
- epoch,
207
- 100. * batch_idx / len(train_loader)))
208
- logger.info([x.item() for x in losses] + [global_step, lr])
209
-
210
- scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
211
- scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
212
-
213
- scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
214
- scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
215
- scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
216
- image_dict = {
217
- "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
218
- "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
219
- "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
220
- "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
221
- }
222
- utils.summarize(
223
- writer=writer,
224
- global_step=global_step,
225
- images=image_dict,
226
- scalars=scalar_dict)
227
-
228
- if global_step % hps.train.eval_interval == 0:
229
- evaluate(hps, net_g, eval_loader, writer_eval)
230
- utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
231
- utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
232
- old_g=os.path.join(hps.model_dir, "G_{}.pth".format(global_step-2000))
233
- old_d=os.path.join(hps.model_dir, "D_{}.pth".format(global_step-2000))
234
- if os.path.exists(old_g):
235
- os.remove(old_g)
236
- if os.path.exists(old_d):
237
- os.remove(old_d)
238
- global_step += 1
239
-
240
- if rank == 0:
241
- logger.info('====> Epoch: {}'.format(epoch))
242
-
243
-
244
- def evaluate(hps, generator, eval_loader, writer_eval):
245
- generator.eval()
246
- with torch.no_grad():
247
- for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(eval_loader):
248
- x, x_lengths = x.cuda(0), x_lengths.cuda(0)
249
- spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
250
- y, y_lengths = y.cuda(0), y_lengths.cuda(0)
251
-
252
- # remove else
253
- x = x[:1]
254
- x_lengths = x_lengths[:1]
255
- spec = spec[:1]
256
- spec_lengths = spec_lengths[:1]
257
- y = y[:1]
258
- y_lengths = y_lengths[:1]
259
- break
260
- y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, max_len=1000)
261
- y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
262
-
263
- mel = spec_to_mel_torch(
264
- spec,
265
- hps.data.filter_length,
266
- hps.data.n_mel_channels,
267
- hps.data.sampling_rate,
268
- hps.data.mel_fmin,
269
- hps.data.mel_fmax)
270
- y_hat_mel = mel_spectrogram_torch(
271
- y_hat.squeeze(1).float(),
272
- hps.data.filter_length,
273
- hps.data.n_mel_channels,
274
- hps.data.sampling_rate,
275
- hps.data.hop_length,
276
- hps.data.win_length,
277
- hps.data.mel_fmin,
278
- hps.data.mel_fmax
279
- )
280
- image_dict = {
281
- "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
282
- }
283
- audio_dict = {
284
- "gen/audio": y_hat[0,:,:y_hat_lengths[0]]
285
- }
286
- if global_step == 0:
287
- image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
288
- audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
289
-
290
- utils.summarize(
291
- writer=writer_eval,
292
- global_step=global_step,
293
- images=image_dict,
294
- audios=audio_dict,
295
- audio_sampling_rate=hps.data.sampling_rate
296
- )
297
- generator.train()
298
-
299
-
300
- if __name__ == "__main__":
301
- main()