File size: 5,433 Bytes
d5ee97c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
# This is the hyperparameter configuration file for ParallelWavegan.
# Please make sure this is adjusted for the LJSpeech dataset. If you want to
# apply to the other dataset, you might need to carefully change some parameters.
# This configuration performs 4000k iters.
# Original: https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/egs/ljspeech/voc1/conf/parallel_wavegan.v1.yaml
###########################################################
# FEATURE EXTRACTION SETTING #
###########################################################
sampling_rate: 22050
hop_size: 256 # Hop size.
format: "npy"
###########################################################
# GENERATOR NETWORK ARCHITECTURE SETTING #
###########################################################
model_type: "parallel_wavegan_generator"
parallel_wavegan_generator_params:
out_channels: 1 # Number of output channels.
kernel_size: 3 # Kernel size of dilated convolution.
n_layers: 30 # Number of residual block layers.
stacks: 3 # Number of stacks i.e., dilation cycles.
residual_channels: 64 # Number of channels in residual conv.
gate_channels: 128 # Number of channels in gated conv.
skip_channels: 64 # Number of channels in skip conv.
aux_channels: 80 # Number of channels for auxiliary feature conv.
# Must be the same as num_mels.
aux_context_window: 2 # Context window size for auxiliary feature.
# If set to 2, previous 2 and future 2 frames will be considered.
dropout: 0.0 # Dropout rate. 0.0 means no dropout applied.
upsample_params: # Upsampling network parameters.
upsample_scales: [4, 4, 4, 4] # Upsampling scales. Prodcut of these must be the same as hop size.
###########################################################
# DISCRIMINATOR NETWORK ARCHITECTURE SETTING #
###########################################################
parallel_wavegan_discriminator_params:
out_channels: 1 # Number of output channels.
kernel_size: 3 # Number of output channels.
n_layers: 10 # Number of conv layers.
conv_channels: 64 # Number of chnn layers.
use_bias: true # Whether to use bias parameter in conv.
nonlinear_activation: "LeakyReLU" # Nonlinear function after each conv.
nonlinear_activation_params: # Nonlinear function parameters
alpha: 0.2 # Alpha in LeakyReLU.
###########################################################
# STFT LOSS SETTING #
###########################################################
stft_loss_params:
fft_lengths: [1024, 2048, 512] # List of FFT size for STFT-based loss.
frame_steps: [120, 240, 50] # List of hop size for STFT-based loss
frame_lengths: [600, 1200, 240] # List of window length for STFT-based loss.
###########################################################
# ADVERSARIAL LOSS SETTING #
###########################################################
lambda_adv: 4.0 # Loss balancing coefficient.
###########################################################
# DATA LOADER SETTING #
###########################################################
batch_size: 6 # Batch size for each GPU with assuming that gradient_accumulation_steps == 1.
batch_max_steps: 25600 # Length of each audio in batch for training. Make sure dividable by hop_size.
batch_max_steps_valid: 81920 # Length of each audio for validation. Make sure dividable by hope_size.
remove_short_samples: true # Whether to remove samples the length of which are less than batch_max_steps.
allow_cache: true # Whether to allow cache in dataset. If true, it requires cpu memory.
is_shuffle: true # shuffle dataset after each epoch.
###########################################################
# OPTIMIZER & SCHEDULER SETTING #
###########################################################
generator_optimizer_params:
lr_fn: "ExponentialDecay"
lr_params:
initial_learning_rate: 0.0005
decay_steps: 200000
decay_rate: 0.5
discriminator_optimizer_params:
lr_fn: "ExponentialDecay"
lr_params:
initial_learning_rate: 0.0005
decay_steps: 200000
decay_rate: 0.5
gradient_accumulation_steps: 1
###########################################################
# INTERVAL SETTING #
###########################################################
discriminator_train_start_steps: 100000 # steps begin training discriminator
train_max_steps: 400000 # Number of training steps.
save_interval_steps: 5000 # Interval steps to save checkpoint.
eval_interval_steps: 2000 # Interval steps to evaluate the network.
log_interval_steps: 200 # Interval steps to record the training log.
###########################################################
# OTHER SETTING #
###########################################################
num_save_intermediate_results: 1 # Number of batch to be saved as intermediate results.
|