w2v2_base_dutch / pretraining_config.yaml
Jakob Poncelet
First model version
b4d3e53
raw
history blame
1.29 kB
# @package _group_
common:
memory_efficient_fp16: true
log_format: json
log_interval: 100
checkpoint:
save_interval_updates: 10000
keep_interval_updates: 1
no_epoch_checkpoints: true
save_dir: /esat/spchtemp/scratch/jponcele/selfsupervised_exps/result/pretrain_w2v2_cgn-unsup-VW_base
task:
_name: audio_pretraining
data: /users/spraak/jponcele/BenchmarkingSS/data/cgn_unsup_VW_w2v2
max_sample_size: 250000
min_sample_size: 4000
segments: true
normalize: true
dataset:
num_workers: 6
#batch_size: 4
max_tokens: 1400000
skip_invalid_size_inputs_valid_test: true
valid_subset: test
data_buffer_size: 1 #2
required_batch_size_multiple: 1 #default=8
distributed_training:
distributed_world_size: 1
ddp_backend: legacy_ddp
criterion:
_name: wav2vec
infonce: true
log_keys: ["prob_perplexity","code_perplexity","temp"]
loss_weights: [0.1, 10]
optimization:
max_update: 400000
lr: [0.0005]
update_freq: [32]
optimizer:
_name: adam
adam_betas: (0.9,0.98)
adam_eps: 1e-06
weight_decay: 0.01
lr_scheduler:
_name: polynomial_decay
warmup_updates: 50000
model:
_name: wav2vec2
quantize_targets: true
final_dim: 256
encoder_layerdrop: 0.05
dropout_input: 0.1
dropout_features: 0.1
feature_grad_mult: 0.1