procit009 commited on
Commit
84e273d
·
verified ·
1 Parent(s): e542492

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. added_tokens.json +3 -0
  2. checkpoint-1000/model.safetensors +3 -0
  3. checkpoint-1000/model_1.safetensors +3 -0
  4. checkpoint-1000/optimizer.bin +3 -0
  5. checkpoint-1000/optimizer_1.bin +3 -0
  6. checkpoint-1000/random_states_0.pkl +3 -0
  7. checkpoint-1000/scaler.pt +3 -0
  8. checkpoint-1000/scheduler.bin +3 -0
  9. checkpoint-1000/scheduler_1.bin +3 -0
  10. checkpoint-1500/model.safetensors +3 -0
  11. checkpoint-1500/model_1.safetensors +3 -0
  12. checkpoint-1500/optimizer.bin +3 -0
  13. checkpoint-1500/optimizer_1.bin +3 -0
  14. checkpoint-500/model.safetensors +3 -0
  15. checkpoint-500/model_1.safetensors +3 -0
  16. checkpoint-500/optimizer.bin +3 -0
  17. checkpoint-500/optimizer_1.bin +3 -0
  18. checkpoint-500/random_states_0.pkl +3 -0
  19. checkpoint-500/scaler.pt +3 -0
  20. checkpoint-500/scheduler.bin +3 -0
  21. checkpoint-500/scheduler_1.bin +3 -0
  22. config.json +107 -0
  23. preprocessor_config.json +11 -0
  24. special_tokens_map.json +16 -0
  25. tmp/vits_dutch_finetuned/runs/Dec23_16-29-10_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734950738.482024/events.out.tfevents.1734950738.DESKTOP-7MSM0GK.6356.1 +3 -0
  26. tmp/vits_dutch_finetuned/runs/Dec23_16-29-10_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734950738.484406/hparams.yml +144 -0
  27. tmp/vits_dutch_finetuned/runs/Dec23_16-29-10_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734950736.DESKTOP-7MSM0GK.6356.0 +3 -0
  28. tmp/vits_dutch_finetuned/runs/Dec23_16-36-09_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734951083.7071934/events.out.tfevents.1734951083.DESKTOP-7MSM0GK.7854.1 +3 -0
  29. tmp/vits_dutch_finetuned/runs/Dec23_16-36-09_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734951083.7096193/hparams.yml +144 -0
  30. tmp/vits_dutch_finetuned/runs/Dec23_16-36-09_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734951081.DESKTOP-7MSM0GK.7854.0 +3 -0
  31. tmp/vits_dutch_finetuned/runs/Dec23_16-42-27_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734951460.5623682/events.out.tfevents.1734951460.DESKTOP-7MSM0GK.9106.1 +3 -0
  32. tmp/vits_dutch_finetuned/runs/Dec23_16-42-27_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734951460.5648153/hparams.yml +144 -0
  33. tmp/vits_dutch_finetuned/runs/Dec23_16-42-27_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734951458.DESKTOP-7MSM0GK.9106.0 +3 -0
  34. tmp/vits_dutch_finetuned/runs/Dec23_16-47-31_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734951765.4133904/events.out.tfevents.1734951765.DESKTOP-7MSM0GK.10133.1 +3 -0
  35. tmp/vits_dutch_finetuned/runs/Dec23_16-47-31_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734951765.4160218/hparams.yml +144 -0
  36. tmp/vits_dutch_finetuned/runs/Dec23_16-47-31_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734951763.DESKTOP-7MSM0GK.10133.0 +3 -0
  37. tmp/vits_dutch_finetuned/runs/Dec23_16-54-15_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952169.0003452/events.out.tfevents.1734952169.DESKTOP-7MSM0GK.11439.1 +3 -0
  38. tmp/vits_dutch_finetuned/runs/Dec23_16-54-15_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952169.0047798/hparams.yml +144 -0
  39. tmp/vits_dutch_finetuned/runs/Dec23_16-54-15_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734952167.DESKTOP-7MSM0GK.11439.0 +3 -0
  40. tmp/vits_dutch_finetuned/runs/Dec23_16-58-47_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952442.0954733/events.out.tfevents.1734952442.DESKTOP-7MSM0GK.12358.1 +3 -0
  41. tmp/vits_dutch_finetuned/runs/Dec23_16-58-47_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952442.0982804/hparams.yml +144 -0
  42. tmp/vits_dutch_finetuned/runs/Dec23_16-58-47_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734952440.DESKTOP-7MSM0GK.12358.0 +3 -0
  43. tmp/vits_dutch_finetuned/runs/Dec23_17-01-28_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952601.7539113/events.out.tfevents.1734952601.DESKTOP-7MSM0GK.12954.1 +3 -0
  44. tmp/vits_dutch_finetuned/runs/Dec23_17-01-28_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952601.7567434/hparams.yml +144 -0
  45. tmp/vits_dutch_finetuned/runs/Dec23_17-01-28_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734952600.DESKTOP-7MSM0GK.12954.0 +3 -0
  46. tmp/vits_dutch_finetuned/runs/Dec23_17-04-42_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952795.457109/events.out.tfevents.1734952795.DESKTOP-7MSM0GK.13651.1 +3 -0
  47. tmp/vits_dutch_finetuned/runs/Dec23_17-04-42_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952795.4594233/hparams.yml +144 -0
  48. tmp/vits_dutch_finetuned/runs/Dec23_17-04-42_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734952793.DESKTOP-7MSM0GK.13651.0 +3 -0
  49. tmp/vits_dutch_finetuned/runs/Dec23_17-10-29_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734953143.6386623/events.out.tfevents.1734953143.DESKTOP-7MSM0GK.14803.1 +3 -0
  50. tmp/vits_dutch_finetuned/runs/Dec23_17-10-29_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734953143.6412678/hparams.yml +144 -0
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<unk>": 41
3
+ }
checkpoint-1000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec1efe7ce7f2903e64605b4b599906223e8b7713ab3be3fe87d7605a6688664a
3
+ size 145285216
checkpoint-1000/model_1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be72e84e6ac6a1276047fdda45297e1981f61600e9789328f24ac26d73cc6af0
3
+ size 187000136
checkpoint-1000/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b47bf00fc7c1f729d8f7126c9267e03bc365458789cff82947a20c797e7b3bb
3
+ size 291076988
checkpoint-1000/optimizer_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f97a74c7b09b4794800f7b37933ecfee18460a6eb8f8de2e5afffb0104e334c
3
+ size 374071772
checkpoint-1000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f9b353fef84c5a17fea698dbcb77cfd29da1f0242320bc5396c270cd1b6cfd7
3
+ size 14344
checkpoint-1000/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5931c4c95b122777cb75e67faf86734964d0090d07e56573125fb98262dfd8a1
3
+ size 988
checkpoint-1000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48317425250665991367c963b89f6bc4c7b95085fd9e4f300b478992811a60dd
3
+ size 1000
checkpoint-1000/scheduler_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d760dfa29eeb19db8f0764f54ae021476f73f89df07740ffb76d432e8d7a0d13
3
+ size 1008
checkpoint-1500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3c06ddcc36546ee32ca2e2e9a27802310ecbb9ffcf13a9de1dd766a7304a065
3
+ size 75497472
checkpoint-1500/model_1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c7ca818d52585efc164c74225b299563db05b979bdb53eb7094ef3fcc73097f
3
+ size 75497472
checkpoint-1500/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c24fb666045f6ad4884ae8ae649988f54decc6386d89a9493bf9c8405c7c0990
3
+ size 75497472
checkpoint-1500/optimizer_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f5dddfc394e0df8a4691bb0a0a112746bdc7566dbc92ec2877e6ff465345858
3
+ size 3891200
checkpoint-500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e31d878e1578d43bf58cd1d03f0804aa36c43d59cca2513ba4684e41357b77b1
3
+ size 145285216
checkpoint-500/model_1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f53ea5966e38bcdea173d994868780eb2b8f73563b592dba996758fcc62cd512
3
+ size 187000136
checkpoint-500/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad989ec50b8441007500056b70710f9b5aaacb8137fc080ec86bb6e4717f7c96
3
+ size 291076988
checkpoint-500/optimizer_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9ed274e25a4692c52f569e9aa7d8c75431e07a6a86921b28eb1cbcdcbc1a068
3
+ size 374071772
checkpoint-500/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:879c5c51860fcdc6f3b957c918f3332fc6fab07e4ee8ea8602e5fdb2242bff61
3
+ size 14472
checkpoint-500/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a28d7e8ef253574a0430c266cf3fedf03a9ba35a20a729c89db80bb4000c645
3
+ size 988
checkpoint-500/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18b5b7844bb293da8a22e1427b8cb72f66a3c9b80dc809accd22b46471453b91
3
+ size 1000
checkpoint-500/scheduler_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fa99258f9dfae9103928a006d7f7fa822e4b82290c1dc17989521373313941f
3
+ size 1008
config.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.1,
3
+ "architectures": [
4
+ "VitsModelForPreTraining"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "depth_separable_channels": 2,
8
+ "depth_separable_num_layers": 3,
9
+ "discriminator_kernel_size": 5,
10
+ "discriminator_period_channels": [
11
+ 1,
12
+ 32,
13
+ 128,
14
+ 512,
15
+ 1024
16
+ ],
17
+ "discriminator_periods": [
18
+ 2,
19
+ 3,
20
+ 5,
21
+ 7,
22
+ 11
23
+ ],
24
+ "discriminator_scale_channels": [
25
+ 1,
26
+ 16,
27
+ 64,
28
+ 256,
29
+ 1024
30
+ ],
31
+ "discriminator_stride": 3,
32
+ "duration_predictor_dropout": 0.5,
33
+ "duration_predictor_filter_channels": 256,
34
+ "duration_predictor_flow_bins": 10,
35
+ "duration_predictor_kernel_size": 3,
36
+ "duration_predictor_num_flows": 4,
37
+ "duration_predictor_tail_bound": 5.0,
38
+ "ffn_dim": 768,
39
+ "ffn_kernel_size": 3,
40
+ "flow_size": 192,
41
+ "hidden_act": "relu",
42
+ "hidden_dropout": 0.1,
43
+ "hidden_size": 192,
44
+ "hop_length": 256,
45
+ "initializer_range": 0.02,
46
+ "layer_norm_eps": 1e-05,
47
+ "layerdrop": 0.1,
48
+ "leaky_relu_slope": 0.1,
49
+ "model_type": "vits",
50
+ "noise_scale": 0.667,
51
+ "noise_scale_duration": 0.8,
52
+ "num_attention_heads": 2,
53
+ "num_hidden_layers": 6,
54
+ "num_speakers": 1,
55
+ "posterior_encoder_num_wavenet_layers": 16,
56
+ "prior_encoder_num_flows": 4,
57
+ "prior_encoder_num_wavenet_layers": 4,
58
+ "resblock_dilation_sizes": [
59
+ [
60
+ 1,
61
+ 3,
62
+ 5
63
+ ],
64
+ [
65
+ 1,
66
+ 3,
67
+ 5
68
+ ],
69
+ [
70
+ 1,
71
+ 3,
72
+ 5
73
+ ]
74
+ ],
75
+ "resblock_kernel_sizes": [
76
+ 3,
77
+ 7,
78
+ 11
79
+ ],
80
+ "sampling_rate": 16000,
81
+ "segment_size": 8192,
82
+ "speaker_embedding_size": 0,
83
+ "speaking_rate": 1.0,
84
+ "spectrogram_bins": 513,
85
+ "torch_dtype": "float32",
86
+ "transformers_version": "4.47.1",
87
+ "upsample_initial_channel": 512,
88
+ "upsample_kernel_sizes": [
89
+ 16,
90
+ 16,
91
+ 4,
92
+ 4
93
+ ],
94
+ "upsample_rates": [
95
+ 8,
96
+ 8,
97
+ 2,
98
+ 2
99
+ ],
100
+ "use_bias": true,
101
+ "use_stochastic_duration_prediction": true,
102
+ "vocab_size": 41,
103
+ "wavenet_dilation_rate": 1,
104
+ "wavenet_dropout": 0.0,
105
+ "wavenet_kernel_size": 5,
106
+ "window_size": 4
107
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "feature_extractor_type": "VitsFeatureExtractor",
3
+ "feature_size": 80,
4
+ "hop_length": 256,
5
+ "max_wav_value": 32768.0,
6
+ "n_fft": 1024,
7
+ "padding_side": "right",
8
+ "padding_value": 0.0,
9
+ "return_attention_mask": false,
10
+ "sampling_rate": 16000
11
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pad_token": {
3
+ "content": "g",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "unk_token": {
10
+ "content": "<unk>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ }
16
+ }
tmp/vits_dutch_finetuned/runs/Dec23_16-29-10_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734950738.482024/events.out.tfevents.1734950738.DESKTOP-7MSM0GK.6356.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6954d410020baf4d78fe8e52d6a94d08d0865a56ae8c8109a4cd62aca7a04ea1
3
+ size 7964
tmp/vits_dutch_finetuned/runs/Dec23_16-29-10_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734950738.484406/hparams.yml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerator_config: '{''split_batches'': False, ''dispatch_batches'': None, ''even_batches'':
2
+ True, ''use_seedable_sampler'': True, ''non_blocking'': False, ''gradient_accumulation_kwargs'':
3
+ None}'
4
+ adafactor: false
5
+ adam_beta1: 0.9
6
+ adam_beta2: 0.999
7
+ adam_epsilon: 1.0e-08
8
+ auto_find_batch_size: false
9
+ average_tokens_across_devices: false
10
+ batch_eval_metrics: false
11
+ bf16: false
12
+ bf16_full_eval: false
13
+ data_seed: None
14
+ dataloader_drop_last: false
15
+ dataloader_num_workers: 0
16
+ dataloader_persistent_workers: false
17
+ dataloader_pin_memory: true
18
+ dataloader_prefetch_factor: None
19
+ ddp_backend: None
20
+ ddp_broadcast_buffers: None
21
+ ddp_bucket_cap_mb: None
22
+ ddp_find_unused_parameters: None
23
+ ddp_timeout: 1800
24
+ debug: '[]'
25
+ deepspeed: None
26
+ disable_tqdm: false
27
+ dispatch_batches: None
28
+ do_eval: true
29
+ do_predict: false
30
+ do_step_schedule_per_epoch: true
31
+ do_train: true
32
+ eval_accumulation_steps: None
33
+ eval_batch_size: 4
34
+ eval_delay: 0
35
+ eval_do_concat_batches: true
36
+ eval_on_start: false
37
+ eval_steps: 25
38
+ eval_strategy: 'no'
39
+ eval_use_gather_object: false
40
+ evaluation_strategy: None
41
+ fp16: true
42
+ fp16_backend: auto
43
+ fp16_full_eval: false
44
+ fp16_opt_level: O1
45
+ fsdp: '[]'
46
+ fsdp_config: '{''min_num_params'': 0, ''xla'': False, ''xla_fsdp_v2'': False, ''xla_fsdp_grad_ckpt'':
47
+ False}'
48
+ fsdp_min_num_params: 0
49
+ fsdp_transformer_layer_cls_to_wrap: None
50
+ full_determinism: false
51
+ gradient_accumulation_steps: 2
52
+ gradient_checkpointing: false
53
+ gradient_checkpointing_kwargs: None
54
+ greater_is_better: None
55
+ group_by_length: false
56
+ half_precision_backend: auto
57
+ hub_always_push: false
58
+ hub_model_id: vits_dutch_finetuned_common_voice
59
+ hub_private_repo: None
60
+ hub_strategy: every_save
61
+ hub_token: <HUB_TOKEN>
62
+ ignore_data_skip: false
63
+ include_for_metrics: '[]'
64
+ include_inputs_for_metrics: false
65
+ include_num_input_tokens_seen: false
66
+ include_tokens_per_second: false
67
+ jit_mode_eval: false
68
+ label_names: None
69
+ label_smoothing_factor: 0.0
70
+ learning_rate: 3.0e-05
71
+ length_column_name: length
72
+ load_best_model_at_end: false
73
+ local_rank: 0
74
+ log_level: passive
75
+ log_level_replica: warning
76
+ log_on_each_node: true
77
+ logging_dir: ./tmp/vits_dutch_finetuned/runs/Dec23_16-29-10_DESKTOP-7MSM0GK
78
+ logging_first_step: false
79
+ logging_nan_inf_filter: true
80
+ logging_steps: 500
81
+ logging_strategy: steps
82
+ lr_decay: 0.999875
83
+ lr_scheduler_kwargs: '{}'
84
+ lr_scheduler_type: linear
85
+ max_grad_norm: 1.0
86
+ max_steps: 1700
87
+ metric_for_best_model: None
88
+ mp_parameters: ''
89
+ neftune_noise_alpha: None
90
+ no_cuda: false
91
+ num_train_epochs: 50
92
+ optim: adamw_torch
93
+ optim_args: None
94
+ optim_target_modules: None
95
+ output_dir: ./tmp/vits_dutch_finetuned
96
+ overwrite_output_dir: true
97
+ past_index: -1
98
+ per_device_eval_batch_size: 4
99
+ per_device_train_batch_size: 4
100
+ per_gpu_eval_batch_size: None
101
+ per_gpu_train_batch_size: None
102
+ prediction_loss_only: false
103
+ push_to_hub: true
104
+ push_to_hub_model_id: None
105
+ push_to_hub_organization: None
106
+ push_to_hub_token: <PUSH_TO_HUB_TOKEN>
107
+ ray_scope: last
108
+ remove_unused_columns: true
109
+ report_to: '[''tensorboard'', ''wandb'']'
110
+ restore_callback_states_from_checkpoint: false
111
+ resume_from_checkpoint: None
112
+ run_name: ./tmp/vits_dutch_finetuned
113
+ save_on_each_node: false
114
+ save_only_model: false
115
+ save_safetensors: true
116
+ save_steps: 500
117
+ save_strategy: steps
118
+ save_total_limit: None
119
+ seed: 123
120
+ skip_memory_metrics: true
121
+ split_batches: None
122
+ tf32: None
123
+ torch_compile: false
124
+ torch_compile_backend: None
125
+ torch_compile_mode: None
126
+ torch_empty_cache_steps: None
127
+ torchdynamo: None
128
+ tpu_metrics_debug: false
129
+ tpu_num_cores: None
130
+ train_batch_size: 4
131
+ use_cpu: false
132
+ use_ipex: false
133
+ use_legacy_prediction_loop: false
134
+ use_liger_kernel: false
135
+ use_mps_device: false
136
+ warmup_ratio: 0.01
137
+ warmup_steps: 0
138
+ weight_decay: 0.0
139
+ weight_disc: 3
140
+ weight_duration: 1
141
+ weight_fmaps: 1
142
+ weight_gen: 1
143
+ weight_kl: 1.0
144
+ weight_mel: 30
tmp/vits_dutch_finetuned/runs/Dec23_16-29-10_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734950736.DESKTOP-7MSM0GK.6356.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f89819fbff8f43da0779bcbeb87e48f5081891ff14e5c41669da8987261c49f
3
+ size 13388
tmp/vits_dutch_finetuned/runs/Dec23_16-36-09_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734951083.7071934/events.out.tfevents.1734951083.DESKTOP-7MSM0GK.7854.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d7f456077df3f695f22edb1ed81b11d011ee1903ccf25bffc2e38379fe33eaa
3
+ size 7964
tmp/vits_dutch_finetuned/runs/Dec23_16-36-09_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734951083.7096193/hparams.yml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerator_config: '{''split_batches'': False, ''dispatch_batches'': None, ''even_batches'':
2
+ True, ''use_seedable_sampler'': True, ''non_blocking'': False, ''gradient_accumulation_kwargs'':
3
+ None}'
4
+ adafactor: false
5
+ adam_beta1: 0.9
6
+ adam_beta2: 0.999
7
+ adam_epsilon: 1.0e-08
8
+ auto_find_batch_size: false
9
+ average_tokens_across_devices: false
10
+ batch_eval_metrics: false
11
+ bf16: false
12
+ bf16_full_eval: false
13
+ data_seed: None
14
+ dataloader_drop_last: false
15
+ dataloader_num_workers: 0
16
+ dataloader_persistent_workers: false
17
+ dataloader_pin_memory: true
18
+ dataloader_prefetch_factor: None
19
+ ddp_backend: None
20
+ ddp_broadcast_buffers: None
21
+ ddp_bucket_cap_mb: None
22
+ ddp_find_unused_parameters: None
23
+ ddp_timeout: 1800
24
+ debug: '[]'
25
+ deepspeed: None
26
+ disable_tqdm: false
27
+ dispatch_batches: None
28
+ do_eval: true
29
+ do_predict: false
30
+ do_step_schedule_per_epoch: true
31
+ do_train: true
32
+ eval_accumulation_steps: None
33
+ eval_batch_size: 4
34
+ eval_delay: 0
35
+ eval_do_concat_batches: true
36
+ eval_on_start: false
37
+ eval_steps: 25
38
+ eval_strategy: 'no'
39
+ eval_use_gather_object: false
40
+ evaluation_strategy: None
41
+ fp16: true
42
+ fp16_backend: auto
43
+ fp16_full_eval: false
44
+ fp16_opt_level: O1
45
+ fsdp: '[]'
46
+ fsdp_config: '{''min_num_params'': 0, ''xla'': False, ''xla_fsdp_v2'': False, ''xla_fsdp_grad_ckpt'':
47
+ False}'
48
+ fsdp_min_num_params: 0
49
+ fsdp_transformer_layer_cls_to_wrap: None
50
+ full_determinism: false
51
+ gradient_accumulation_steps: 2
52
+ gradient_checkpointing: false
53
+ gradient_checkpointing_kwargs: None
54
+ greater_is_better: None
55
+ group_by_length: false
56
+ half_precision_backend: auto
57
+ hub_always_push: false
58
+ hub_model_id: vits_dutch_finetuned_common_voice
59
+ hub_private_repo: None
60
+ hub_strategy: every_save
61
+ hub_token: <HUB_TOKEN>
62
+ ignore_data_skip: false
63
+ include_for_metrics: '[]'
64
+ include_inputs_for_metrics: false
65
+ include_num_input_tokens_seen: false
66
+ include_tokens_per_second: false
67
+ jit_mode_eval: false
68
+ label_names: None
69
+ label_smoothing_factor: 0.0
70
+ learning_rate: 3.0e-05
71
+ length_column_name: length
72
+ load_best_model_at_end: false
73
+ local_rank: 0
74
+ log_level: passive
75
+ log_level_replica: warning
76
+ log_on_each_node: true
77
+ logging_dir: ./tmp/vits_dutch_finetuned/runs/Dec23_16-36-09_DESKTOP-7MSM0GK
78
+ logging_first_step: false
79
+ logging_nan_inf_filter: true
80
+ logging_steps: 500
81
+ logging_strategy: steps
82
+ lr_decay: 0.999875
83
+ lr_scheduler_kwargs: '{}'
84
+ lr_scheduler_type: linear
85
+ max_grad_norm: 1.0
86
+ max_steps: 1700
87
+ metric_for_best_model: None
88
+ mp_parameters: ''
89
+ neftune_noise_alpha: None
90
+ no_cuda: false
91
+ num_train_epochs: 50
92
+ optim: adamw_torch
93
+ optim_args: None
94
+ optim_target_modules: None
95
+ output_dir: ./tmp/vits_dutch_finetuned
96
+ overwrite_output_dir: true
97
+ past_index: -1
98
+ per_device_eval_batch_size: 4
99
+ per_device_train_batch_size: 4
100
+ per_gpu_eval_batch_size: None
101
+ per_gpu_train_batch_size: None
102
+ prediction_loss_only: false
103
+ push_to_hub: true
104
+ push_to_hub_model_id: None
105
+ push_to_hub_organization: None
106
+ push_to_hub_token: <PUSH_TO_HUB_TOKEN>
107
+ ray_scope: last
108
+ remove_unused_columns: true
109
+ report_to: '[''tensorboard'', ''wandb'']'
110
+ restore_callback_states_from_checkpoint: false
111
+ resume_from_checkpoint: None
112
+ run_name: ./tmp/vits_dutch_finetuned
113
+ save_on_each_node: false
114
+ save_only_model: false
115
+ save_safetensors: true
116
+ save_steps: 500
117
+ save_strategy: steps
118
+ save_total_limit: None
119
+ seed: 123
120
+ skip_memory_metrics: true
121
+ split_batches: None
122
+ tf32: None
123
+ torch_compile: false
124
+ torch_compile_backend: None
125
+ torch_compile_mode: None
126
+ torch_empty_cache_steps: None
127
+ torchdynamo: None
128
+ tpu_metrics_debug: false
129
+ tpu_num_cores: None
130
+ train_batch_size: 4
131
+ use_cpu: false
132
+ use_ipex: false
133
+ use_legacy_prediction_loop: false
134
+ use_liger_kernel: false
135
+ use_mps_device: false
136
+ warmup_ratio: 0.01
137
+ warmup_steps: 0
138
+ weight_decay: 0.0
139
+ weight_disc: 3
140
+ weight_duration: 1
141
+ weight_fmaps: 1
142
+ weight_gen: 1
143
+ weight_kl: 1.0
144
+ weight_mel: 30
tmp/vits_dutch_finetuned/runs/Dec23_16-36-09_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734951081.DESKTOP-7MSM0GK.7854.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82c21b3222fcec15bb3e843bb0cfccce888ba6c471f62d71d931f25ae842ec2f
3
+ size 13388
tmp/vits_dutch_finetuned/runs/Dec23_16-42-27_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734951460.5623682/events.out.tfevents.1734951460.DESKTOP-7MSM0GK.9106.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c9d91021a49efdf36452226aea3eb6bece77a1be01f1da48c1bfe4222fc1f62
3
+ size 7964
tmp/vits_dutch_finetuned/runs/Dec23_16-42-27_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734951460.5648153/hparams.yml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerator_config: '{''split_batches'': False, ''dispatch_batches'': None, ''even_batches'':
2
+ True, ''use_seedable_sampler'': True, ''non_blocking'': False, ''gradient_accumulation_kwargs'':
3
+ None}'
4
+ adafactor: false
5
+ adam_beta1: 0.9
6
+ adam_beta2: 0.999
7
+ adam_epsilon: 1.0e-08
8
+ auto_find_batch_size: false
9
+ average_tokens_across_devices: false
10
+ batch_eval_metrics: false
11
+ bf16: false
12
+ bf16_full_eval: false
13
+ data_seed: None
14
+ dataloader_drop_last: false
15
+ dataloader_num_workers: 0
16
+ dataloader_persistent_workers: false
17
+ dataloader_pin_memory: true
18
+ dataloader_prefetch_factor: None
19
+ ddp_backend: None
20
+ ddp_broadcast_buffers: None
21
+ ddp_bucket_cap_mb: None
22
+ ddp_find_unused_parameters: None
23
+ ddp_timeout: 1800
24
+ debug: '[]'
25
+ deepspeed: None
26
+ disable_tqdm: false
27
+ dispatch_batches: None
28
+ do_eval: true
29
+ do_predict: false
30
+ do_step_schedule_per_epoch: true
31
+ do_train: true
32
+ eval_accumulation_steps: None
33
+ eval_batch_size: 4
34
+ eval_delay: 0
35
+ eval_do_concat_batches: true
36
+ eval_on_start: false
37
+ eval_steps: 25
38
+ eval_strategy: 'no'
39
+ eval_use_gather_object: false
40
+ evaluation_strategy: None
41
+ fp16: true
42
+ fp16_backend: auto
43
+ fp16_full_eval: false
44
+ fp16_opt_level: O1
45
+ fsdp: '[]'
46
+ fsdp_config: '{''min_num_params'': 0, ''xla'': False, ''xla_fsdp_v2'': False, ''xla_fsdp_grad_ckpt'':
47
+ False}'
48
+ fsdp_min_num_params: 0
49
+ fsdp_transformer_layer_cls_to_wrap: None
50
+ full_determinism: false
51
+ gradient_accumulation_steps: 2
52
+ gradient_checkpointing: false
53
+ gradient_checkpointing_kwargs: None
54
+ greater_is_better: None
55
+ group_by_length: false
56
+ half_precision_backend: auto
57
+ hub_always_push: false
58
+ hub_model_id: vits_dutch_finetuned_common_voice
59
+ hub_private_repo: None
60
+ hub_strategy: every_save
61
+ hub_token: <HUB_TOKEN>
62
+ ignore_data_skip: false
63
+ include_for_metrics: '[]'
64
+ include_inputs_for_metrics: false
65
+ include_num_input_tokens_seen: false
66
+ include_tokens_per_second: false
67
+ jit_mode_eval: false
68
+ label_names: None
69
+ label_smoothing_factor: 0.0
70
+ learning_rate: 3.0e-05
71
+ length_column_name: length
72
+ load_best_model_at_end: false
73
+ local_rank: 0
74
+ log_level: passive
75
+ log_level_replica: warning
76
+ log_on_each_node: true
77
+ logging_dir: ./tmp/vits_dutch_finetuned/runs/Dec23_16-42-27_DESKTOP-7MSM0GK
78
+ logging_first_step: false
79
+ logging_nan_inf_filter: true
80
+ logging_steps: 500
81
+ logging_strategy: steps
82
+ lr_decay: 0.999875
83
+ lr_scheduler_kwargs: '{}'
84
+ lr_scheduler_type: linear
85
+ max_grad_norm: 1.0
86
+ max_steps: 1700
87
+ metric_for_best_model: None
88
+ mp_parameters: ''
89
+ neftune_noise_alpha: None
90
+ no_cuda: false
91
+ num_train_epochs: 50
92
+ optim: adamw_torch
93
+ optim_args: None
94
+ optim_target_modules: None
95
+ output_dir: ./tmp/vits_dutch_finetuned
96
+ overwrite_output_dir: true
97
+ past_index: -1
98
+ per_device_eval_batch_size: 4
99
+ per_device_train_batch_size: 4
100
+ per_gpu_eval_batch_size: None
101
+ per_gpu_train_batch_size: None
102
+ prediction_loss_only: false
103
+ push_to_hub: true
104
+ push_to_hub_model_id: None
105
+ push_to_hub_organization: None
106
+ push_to_hub_token: <PUSH_TO_HUB_TOKEN>
107
+ ray_scope: last
108
+ remove_unused_columns: true
109
+ report_to: '[''tensorboard'', ''wandb'']'
110
+ restore_callback_states_from_checkpoint: false
111
+ resume_from_checkpoint: None
112
+ run_name: ./tmp/vits_dutch_finetuned
113
+ save_on_each_node: false
114
+ save_only_model: false
115
+ save_safetensors: true
116
+ save_steps: 500
117
+ save_strategy: steps
118
+ save_total_limit: None
119
+ seed: 123
120
+ skip_memory_metrics: true
121
+ split_batches: None
122
+ tf32: None
123
+ torch_compile: false
124
+ torch_compile_backend: None
125
+ torch_compile_mode: None
126
+ torch_empty_cache_steps: None
127
+ torchdynamo: None
128
+ tpu_metrics_debug: false
129
+ tpu_num_cores: None
130
+ train_batch_size: 4
131
+ use_cpu: false
132
+ use_ipex: false
133
+ use_legacy_prediction_loop: false
134
+ use_liger_kernel: false
135
+ use_mps_device: false
136
+ warmup_ratio: 0.01
137
+ warmup_steps: 0
138
+ weight_decay: 0.0
139
+ weight_disc: 3
140
+ weight_duration: 1
141
+ weight_fmaps: 1
142
+ weight_gen: 1
143
+ weight_kl: 1.0
144
+ weight_mel: 30
tmp/vits_dutch_finetuned/runs/Dec23_16-42-27_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734951458.DESKTOP-7MSM0GK.9106.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d80b0838aeb4af09998a923b2cd19830c0cc91df81db5b6ab8d1c3bcda55633
3
+ size 13388
tmp/vits_dutch_finetuned/runs/Dec23_16-47-31_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734951765.4133904/events.out.tfevents.1734951765.DESKTOP-7MSM0GK.10133.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58f44a83759c84137d5abd0ef8632a561f445e0289e8b1b96859aaf675841de5
3
+ size 7964
tmp/vits_dutch_finetuned/runs/Dec23_16-47-31_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734951765.4160218/hparams.yml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerator_config: '{''split_batches'': False, ''dispatch_batches'': None, ''even_batches'':
2
+ True, ''use_seedable_sampler'': True, ''non_blocking'': False, ''gradient_accumulation_kwargs'':
3
+ None}'
4
+ adafactor: false
5
+ adam_beta1: 0.9
6
+ adam_beta2: 0.999
7
+ adam_epsilon: 1.0e-08
8
+ auto_find_batch_size: false
9
+ average_tokens_across_devices: false
10
+ batch_eval_metrics: false
11
+ bf16: false
12
+ bf16_full_eval: false
13
+ data_seed: None
14
+ dataloader_drop_last: false
15
+ dataloader_num_workers: 0
16
+ dataloader_persistent_workers: false
17
+ dataloader_pin_memory: true
18
+ dataloader_prefetch_factor: None
19
+ ddp_backend: None
20
+ ddp_broadcast_buffers: None
21
+ ddp_bucket_cap_mb: None
22
+ ddp_find_unused_parameters: None
23
+ ddp_timeout: 1800
24
+ debug: '[]'
25
+ deepspeed: None
26
+ disable_tqdm: false
27
+ dispatch_batches: None
28
+ do_eval: true
29
+ do_predict: false
30
+ do_step_schedule_per_epoch: true
31
+ do_train: true
32
+ eval_accumulation_steps: None
33
+ eval_batch_size: 4
34
+ eval_delay: 0
35
+ eval_do_concat_batches: true
36
+ eval_on_start: false
37
+ eval_steps: 25
38
+ eval_strategy: 'no'
39
+ eval_use_gather_object: false
40
+ evaluation_strategy: None
41
+ fp16: true
42
+ fp16_backend: auto
43
+ fp16_full_eval: false
44
+ fp16_opt_level: O1
45
+ fsdp: '[]'
46
+ fsdp_config: '{''min_num_params'': 0, ''xla'': False, ''xla_fsdp_v2'': False, ''xla_fsdp_grad_ckpt'':
47
+ False}'
48
+ fsdp_min_num_params: 0
49
+ fsdp_transformer_layer_cls_to_wrap: None
50
+ full_determinism: false
51
+ gradient_accumulation_steps: 2
52
+ gradient_checkpointing: false
53
+ gradient_checkpointing_kwargs: None
54
+ greater_is_better: None
55
+ group_by_length: false
56
+ half_precision_backend: auto
57
+ hub_always_push: false
58
+ hub_model_id: vits_dutch_finetuned_common_voice
59
+ hub_private_repo: None
60
+ hub_strategy: every_save
61
+ hub_token: <HUB_TOKEN>
62
+ ignore_data_skip: false
63
+ include_for_metrics: '[]'
64
+ include_inputs_for_metrics: false
65
+ include_num_input_tokens_seen: false
66
+ include_tokens_per_second: false
67
+ jit_mode_eval: false
68
+ label_names: None
69
+ label_smoothing_factor: 0.0
70
+ learning_rate: 3.0e-05
71
+ length_column_name: length
72
+ load_best_model_at_end: false
73
+ local_rank: 0
74
+ log_level: passive
75
+ log_level_replica: warning
76
+ log_on_each_node: true
77
+ logging_dir: ./tmp/vits_dutch_finetuned/runs/Dec23_16-47-31_DESKTOP-7MSM0GK
78
+ logging_first_step: false
79
+ logging_nan_inf_filter: true
80
+ logging_steps: 500
81
+ logging_strategy: steps
82
+ lr_decay: 0.999875
83
+ lr_scheduler_kwargs: '{}'
84
+ lr_scheduler_type: linear
85
+ max_grad_norm: 1.0
86
+ max_steps: 1700
87
+ metric_for_best_model: None
88
+ mp_parameters: ''
89
+ neftune_noise_alpha: None
90
+ no_cuda: false
91
+ num_train_epochs: 50
92
+ optim: adamw_torch
93
+ optim_args: None
94
+ optim_target_modules: None
95
+ output_dir: ./tmp/vits_dutch_finetuned
96
+ overwrite_output_dir: true
97
+ past_index: -1
98
+ per_device_eval_batch_size: 4
99
+ per_device_train_batch_size: 4
100
+ per_gpu_eval_batch_size: None
101
+ per_gpu_train_batch_size: None
102
+ prediction_loss_only: false
103
+ push_to_hub: true
104
+ push_to_hub_model_id: None
105
+ push_to_hub_organization: None
106
+ push_to_hub_token: <PUSH_TO_HUB_TOKEN>
107
+ ray_scope: last
108
+ remove_unused_columns: true
109
+ report_to: '[''tensorboard'', ''wandb'']'
110
+ restore_callback_states_from_checkpoint: false
111
+ resume_from_checkpoint: None
112
+ run_name: ./tmp/vits_dutch_finetuned
113
+ save_on_each_node: false
114
+ save_only_model: false
115
+ save_safetensors: true
116
+ save_steps: 500
117
+ save_strategy: steps
118
+ save_total_limit: None
119
+ seed: 123
120
+ skip_memory_metrics: true
121
+ split_batches: None
122
+ tf32: None
123
+ torch_compile: false
124
+ torch_compile_backend: None
125
+ torch_compile_mode: None
126
+ torch_empty_cache_steps: None
127
+ torchdynamo: None
128
+ tpu_metrics_debug: false
129
+ tpu_num_cores: None
130
+ train_batch_size: 4
131
+ use_cpu: false
132
+ use_ipex: false
133
+ use_legacy_prediction_loop: false
134
+ use_liger_kernel: false
135
+ use_mps_device: false
136
+ warmup_ratio: 0.01
137
+ warmup_steps: 0
138
+ weight_decay: 0.0
139
+ weight_disc: 3
140
+ weight_duration: 1
141
+ weight_fmaps: 1
142
+ weight_gen: 1
143
+ weight_kl: 1.0
144
+ weight_mel: 30
tmp/vits_dutch_finetuned/runs/Dec23_16-47-31_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734951763.DESKTOP-7MSM0GK.10133.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d79565b5ef5e40572b331708cb5ec191d9a6ea8b3df49e725a9ea377e589bc6
3
+ size 13388
tmp/vits_dutch_finetuned/runs/Dec23_16-54-15_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952169.0003452/events.out.tfevents.1734952169.DESKTOP-7MSM0GK.11439.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:067bd34b7ec523f9338bdfeb0bab3bf28cf8b6f896d7ed7f18e3474e33e3850c
3
+ size 7964
tmp/vits_dutch_finetuned/runs/Dec23_16-54-15_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952169.0047798/hparams.yml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerator_config: '{''split_batches'': False, ''dispatch_batches'': None, ''even_batches'':
2
+ True, ''use_seedable_sampler'': True, ''non_blocking'': False, ''gradient_accumulation_kwargs'':
3
+ None}'
4
+ adafactor: false
5
+ adam_beta1: 0.9
6
+ adam_beta2: 0.999
7
+ adam_epsilon: 1.0e-08
8
+ auto_find_batch_size: false
9
+ average_tokens_across_devices: false
10
+ batch_eval_metrics: false
11
+ bf16: false
12
+ bf16_full_eval: false
13
+ data_seed: None
14
+ dataloader_drop_last: false
15
+ dataloader_num_workers: 0
16
+ dataloader_persistent_workers: false
17
+ dataloader_pin_memory: true
18
+ dataloader_prefetch_factor: None
19
+ ddp_backend: None
20
+ ddp_broadcast_buffers: None
21
+ ddp_bucket_cap_mb: None
22
+ ddp_find_unused_parameters: None
23
+ ddp_timeout: 1800
24
+ debug: '[]'
25
+ deepspeed: None
26
+ disable_tqdm: false
27
+ dispatch_batches: None
28
+ do_eval: true
29
+ do_predict: false
30
+ do_step_schedule_per_epoch: true
31
+ do_train: true
32
+ eval_accumulation_steps: None
33
+ eval_batch_size: 4
34
+ eval_delay: 0
35
+ eval_do_concat_batches: true
36
+ eval_on_start: false
37
+ eval_steps: 25
38
+ eval_strategy: 'no'
39
+ eval_use_gather_object: false
40
+ evaluation_strategy: None
41
+ fp16: true
42
+ fp16_backend: auto
43
+ fp16_full_eval: false
44
+ fp16_opt_level: O1
45
+ fsdp: '[]'
46
+ fsdp_config: '{''min_num_params'': 0, ''xla'': False, ''xla_fsdp_v2'': False, ''xla_fsdp_grad_ckpt'':
47
+ False}'
48
+ fsdp_min_num_params: 0
49
+ fsdp_transformer_layer_cls_to_wrap: None
50
+ full_determinism: false
51
+ gradient_accumulation_steps: 2
52
+ gradient_checkpointing: false
53
+ gradient_checkpointing_kwargs: None
54
+ greater_is_better: None
55
+ group_by_length: false
56
+ half_precision_backend: auto
57
+ hub_always_push: false
58
+ hub_model_id: vits_dutch_finetuned_common_voice
59
+ hub_private_repo: None
60
+ hub_strategy: every_save
61
+ hub_token: <HUB_TOKEN>
62
+ ignore_data_skip: false
63
+ include_for_metrics: '[]'
64
+ include_inputs_for_metrics: false
65
+ include_num_input_tokens_seen: false
66
+ include_tokens_per_second: false
67
+ jit_mode_eval: false
68
+ label_names: None
69
+ label_smoothing_factor: 0.0
70
+ learning_rate: 3.0e-05
71
+ length_column_name: length
72
+ load_best_model_at_end: false
73
+ local_rank: 0
74
+ log_level: passive
75
+ log_level_replica: warning
76
+ log_on_each_node: true
77
+ logging_dir: ./tmp/vits_dutch_finetuned/runs/Dec23_16-54-15_DESKTOP-7MSM0GK
78
+ logging_first_step: false
79
+ logging_nan_inf_filter: true
80
+ logging_steps: 500
81
+ logging_strategy: steps
82
+ lr_decay: 0.999875
83
+ lr_scheduler_kwargs: '{}'
84
+ lr_scheduler_type: linear
85
+ max_grad_norm: 1.0
86
+ max_steps: 1700
87
+ metric_for_best_model: None
88
+ mp_parameters: ''
89
+ neftune_noise_alpha: None
90
+ no_cuda: false
91
+ num_train_epochs: 50
92
+ optim: adamw_torch
93
+ optim_args: None
94
+ optim_target_modules: None
95
+ output_dir: ./tmp/vits_dutch_finetuned
96
+ overwrite_output_dir: true
97
+ past_index: -1
98
+ per_device_eval_batch_size: 4
99
+ per_device_train_batch_size: 4
100
+ per_gpu_eval_batch_size: None
101
+ per_gpu_train_batch_size: None
102
+ prediction_loss_only: false
103
+ push_to_hub: true
104
+ push_to_hub_model_id: None
105
+ push_to_hub_organization: None
106
+ push_to_hub_token: <PUSH_TO_HUB_TOKEN>
107
+ ray_scope: last
108
+ remove_unused_columns: true
109
+ report_to: '[''tensorboard'', ''wandb'']'
110
+ restore_callback_states_from_checkpoint: false
111
+ resume_from_checkpoint: None
112
+ run_name: ./tmp/vits_dutch_finetuned
113
+ save_on_each_node: false
114
+ save_only_model: false
115
+ save_safetensors: true
116
+ save_steps: 500
117
+ save_strategy: steps
118
+ save_total_limit: None
119
+ seed: 123
120
+ skip_memory_metrics: true
121
+ split_batches: None
122
+ tf32: None
123
+ torch_compile: false
124
+ torch_compile_backend: None
125
+ torch_compile_mode: None
126
+ torch_empty_cache_steps: None
127
+ torchdynamo: None
128
+ tpu_metrics_debug: false
129
+ tpu_num_cores: None
130
+ train_batch_size: 4
131
+ use_cpu: false
132
+ use_ipex: false
133
+ use_legacy_prediction_loop: false
134
+ use_liger_kernel: false
135
+ use_mps_device: false
136
+ warmup_ratio: 0.01
137
+ warmup_steps: 0
138
+ weight_decay: 0.0
139
+ weight_disc: 3
140
+ weight_duration: 1
141
+ weight_fmaps: 1
142
+ weight_gen: 1
143
+ weight_kl: 1.0
144
+ weight_mel: 30
tmp/vits_dutch_finetuned/runs/Dec23_16-54-15_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734952167.DESKTOP-7MSM0GK.11439.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fb8d50a8694df388f915c356fad84f2704216d8884f14fba067fb06b195aeb6
3
+ size 13388
tmp/vits_dutch_finetuned/runs/Dec23_16-58-47_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952442.0954733/events.out.tfevents.1734952442.DESKTOP-7MSM0GK.12358.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36765fb0cfb66c74ddb4be9d08da38a589ab86ad7b29c4fb488b87cdd5a7a547
3
+ size 7964
tmp/vits_dutch_finetuned/runs/Dec23_16-58-47_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952442.0982804/hparams.yml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerator_config: '{''split_batches'': False, ''dispatch_batches'': None, ''even_batches'':
2
+ True, ''use_seedable_sampler'': True, ''non_blocking'': False, ''gradient_accumulation_kwargs'':
3
+ None}'
4
+ adafactor: false
5
+ adam_beta1: 0.9
6
+ adam_beta2: 0.999
7
+ adam_epsilon: 1.0e-08
8
+ auto_find_batch_size: false
9
+ average_tokens_across_devices: false
10
+ batch_eval_metrics: false
11
+ bf16: false
12
+ bf16_full_eval: false
13
+ data_seed: None
14
+ dataloader_drop_last: false
15
+ dataloader_num_workers: 0
16
+ dataloader_persistent_workers: false
17
+ dataloader_pin_memory: true
18
+ dataloader_prefetch_factor: None
19
+ ddp_backend: None
20
+ ddp_broadcast_buffers: None
21
+ ddp_bucket_cap_mb: None
22
+ ddp_find_unused_parameters: None
23
+ ddp_timeout: 1800
24
+ debug: '[]'
25
+ deepspeed: None
26
+ disable_tqdm: false
27
+ dispatch_batches: None
28
+ do_eval: true
29
+ do_predict: false
30
+ do_step_schedule_per_epoch: true
31
+ do_train: true
32
+ eval_accumulation_steps: None
33
+ eval_batch_size: 4
34
+ eval_delay: 0
35
+ eval_do_concat_batches: true
36
+ eval_on_start: false
37
+ eval_steps: 25
38
+ eval_strategy: 'no'
39
+ eval_use_gather_object: false
40
+ evaluation_strategy: None
41
+ fp16: true
42
+ fp16_backend: auto
43
+ fp16_full_eval: false
44
+ fp16_opt_level: O1
45
+ fsdp: '[]'
46
+ fsdp_config: '{''min_num_params'': 0, ''xla'': False, ''xla_fsdp_v2'': False, ''xla_fsdp_grad_ckpt'':
47
+ False}'
48
+ fsdp_min_num_params: 0
49
+ fsdp_transformer_layer_cls_to_wrap: None
50
+ full_determinism: false
51
+ gradient_accumulation_steps: 2
52
+ gradient_checkpointing: false
53
+ gradient_checkpointing_kwargs: None
54
+ greater_is_better: None
55
+ group_by_length: false
56
+ half_precision_backend: auto
57
+ hub_always_push: false
58
+ hub_model_id: vits_dutch_finetuned_common_voice
59
+ hub_private_repo: None
60
+ hub_strategy: every_save
61
+ hub_token: <HUB_TOKEN>
62
+ ignore_data_skip: false
63
+ include_for_metrics: '[]'
64
+ include_inputs_for_metrics: false
65
+ include_num_input_tokens_seen: false
66
+ include_tokens_per_second: false
67
+ jit_mode_eval: false
68
+ label_names: None
69
+ label_smoothing_factor: 0.0
70
+ learning_rate: 3.0e-05
71
+ length_column_name: length
72
+ load_best_model_at_end: false
73
+ local_rank: 0
74
+ log_level: passive
75
+ log_level_replica: warning
76
+ log_on_each_node: true
77
+ logging_dir: ./tmp/vits_dutch_finetuned/runs/Dec23_16-58-47_DESKTOP-7MSM0GK
78
+ logging_first_step: false
79
+ logging_nan_inf_filter: true
80
+ logging_steps: 500
81
+ logging_strategy: steps
82
+ lr_decay: 0.999875
83
+ lr_scheduler_kwargs: '{}'
84
+ lr_scheduler_type: linear
85
+ max_grad_norm: 1.0
86
+ max_steps: 1700
87
+ metric_for_best_model: None
88
+ mp_parameters: ''
89
+ neftune_noise_alpha: None
90
+ no_cuda: false
91
+ num_train_epochs: 50
92
+ optim: adamw_torch
93
+ optim_args: None
94
+ optim_target_modules: None
95
+ output_dir: ./tmp/vits_dutch_finetuned
96
+ overwrite_output_dir: true
97
+ past_index: -1
98
+ per_device_eval_batch_size: 4
99
+ per_device_train_batch_size: 4
100
+ per_gpu_eval_batch_size: None
101
+ per_gpu_train_batch_size: None
102
+ prediction_loss_only: false
103
+ push_to_hub: true
104
+ push_to_hub_model_id: None
105
+ push_to_hub_organization: None
106
+ push_to_hub_token: <PUSH_TO_HUB_TOKEN>
107
+ ray_scope: last
108
+ remove_unused_columns: true
109
+ report_to: '[''tensorboard'', ''wandb'']'
110
+ restore_callback_states_from_checkpoint: false
111
+ resume_from_checkpoint: None
112
+ run_name: ./tmp/vits_dutch_finetuned
113
+ save_on_each_node: false
114
+ save_only_model: false
115
+ save_safetensors: true
116
+ save_steps: 500
117
+ save_strategy: steps
118
+ save_total_limit: None
119
+ seed: 123
120
+ skip_memory_metrics: true
121
+ split_batches: None
122
+ tf32: None
123
+ torch_compile: false
124
+ torch_compile_backend: None
125
+ torch_compile_mode: None
126
+ torch_empty_cache_steps: None
127
+ torchdynamo: None
128
+ tpu_metrics_debug: false
129
+ tpu_num_cores: None
130
+ train_batch_size: 4
131
+ use_cpu: false
132
+ use_ipex: false
133
+ use_legacy_prediction_loop: false
134
+ use_liger_kernel: false
135
+ use_mps_device: false
136
+ warmup_ratio: 0.01
137
+ warmup_steps: 0
138
+ weight_decay: 0.0
139
+ weight_disc: 3
140
+ weight_duration: 1
141
+ weight_fmaps: 1
142
+ weight_gen: 1
143
+ weight_kl: 1.0
144
+ weight_mel: 30
tmp/vits_dutch_finetuned/runs/Dec23_16-58-47_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734952440.DESKTOP-7MSM0GK.12358.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d694c71b22ae1f253725f1de86757adba98a934e7f89c046497553ecd46666a7
3
+ size 13388
tmp/vits_dutch_finetuned/runs/Dec23_17-01-28_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952601.7539113/events.out.tfevents.1734952601.DESKTOP-7MSM0GK.12954.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:798cb43370f60286c01729f547f673d6bad734e8e053ebf4e6574b808ab8416c
3
+ size 7964
tmp/vits_dutch_finetuned/runs/Dec23_17-01-28_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952601.7567434/hparams.yml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerator_config: '{''split_batches'': False, ''dispatch_batches'': None, ''even_batches'':
2
+ True, ''use_seedable_sampler'': True, ''non_blocking'': False, ''gradient_accumulation_kwargs'':
3
+ None}'
4
+ adafactor: false
5
+ adam_beta1: 0.9
6
+ adam_beta2: 0.999
7
+ adam_epsilon: 1.0e-08
8
+ auto_find_batch_size: false
9
+ average_tokens_across_devices: false
10
+ batch_eval_metrics: false
11
+ bf16: false
12
+ bf16_full_eval: false
13
+ data_seed: None
14
+ dataloader_drop_last: false
15
+ dataloader_num_workers: 0
16
+ dataloader_persistent_workers: false
17
+ dataloader_pin_memory: true
18
+ dataloader_prefetch_factor: None
19
+ ddp_backend: None
20
+ ddp_broadcast_buffers: None
21
+ ddp_bucket_cap_mb: None
22
+ ddp_find_unused_parameters: None
23
+ ddp_timeout: 1800
24
+ debug: '[]'
25
+ deepspeed: None
26
+ disable_tqdm: false
27
+ dispatch_batches: None
28
+ do_eval: true
29
+ do_predict: false
30
+ do_step_schedule_per_epoch: true
31
+ do_train: true
32
+ eval_accumulation_steps: None
33
+ eval_batch_size: 4
34
+ eval_delay: 0
35
+ eval_do_concat_batches: true
36
+ eval_on_start: false
37
+ eval_steps: 25
38
+ eval_strategy: 'no'
39
+ eval_use_gather_object: false
40
+ evaluation_strategy: None
41
+ fp16: true
42
+ fp16_backend: auto
43
+ fp16_full_eval: false
44
+ fp16_opt_level: O1
45
+ fsdp: '[]'
46
+ fsdp_config: '{''min_num_params'': 0, ''xla'': False, ''xla_fsdp_v2'': False, ''xla_fsdp_grad_ckpt'':
47
+ False}'
48
+ fsdp_min_num_params: 0
49
+ fsdp_transformer_layer_cls_to_wrap: None
50
+ full_determinism: false
51
+ gradient_accumulation_steps: 2
52
+ gradient_checkpointing: false
53
+ gradient_checkpointing_kwargs: None
54
+ greater_is_better: None
55
+ group_by_length: false
56
+ half_precision_backend: auto
57
+ hub_always_push: false
58
+ hub_model_id: vits_dutch_finetuned_common_voice
59
+ hub_private_repo: None
60
+ hub_strategy: every_save
61
+ hub_token: <HUB_TOKEN>
62
+ ignore_data_skip: false
63
+ include_for_metrics: '[]'
64
+ include_inputs_for_metrics: false
65
+ include_num_input_tokens_seen: false
66
+ include_tokens_per_second: false
67
+ jit_mode_eval: false
68
+ label_names: None
69
+ label_smoothing_factor: 0.0
70
+ learning_rate: 3.0e-05
71
+ length_column_name: length
72
+ load_best_model_at_end: false
73
+ local_rank: 0
74
+ log_level: passive
75
+ log_level_replica: warning
76
+ log_on_each_node: true
77
+ logging_dir: ./tmp/vits_dutch_finetuned/runs/Dec23_17-01-28_DESKTOP-7MSM0GK
78
+ logging_first_step: false
79
+ logging_nan_inf_filter: true
80
+ logging_steps: 500
81
+ logging_strategy: steps
82
+ lr_decay: 0.999875
83
+ lr_scheduler_kwargs: '{}'
84
+ lr_scheduler_type: linear
85
+ max_grad_norm: 1.0
86
+ max_steps: 1700
87
+ metric_for_best_model: None
88
+ mp_parameters: ''
89
+ neftune_noise_alpha: None
90
+ no_cuda: false
91
+ num_train_epochs: 50
92
+ optim: adamw_torch
93
+ optim_args: None
94
+ optim_target_modules: None
95
+ output_dir: ./tmp/vits_dutch_finetuned
96
+ overwrite_output_dir: true
97
+ past_index: -1
98
+ per_device_eval_batch_size: 4
99
+ per_device_train_batch_size: 4
100
+ per_gpu_eval_batch_size: None
101
+ per_gpu_train_batch_size: None
102
+ prediction_loss_only: false
103
+ push_to_hub: true
104
+ push_to_hub_model_id: None
105
+ push_to_hub_organization: None
106
+ push_to_hub_token: <PUSH_TO_HUB_TOKEN>
107
+ ray_scope: last
108
+ remove_unused_columns: true
109
+ report_to: '[''tensorboard'', ''wandb'']'
110
+ restore_callback_states_from_checkpoint: false
111
+ resume_from_checkpoint: None
112
+ run_name: ./tmp/vits_dutch_finetuned
113
+ save_on_each_node: false
114
+ save_only_model: false
115
+ save_safetensors: true
116
+ save_steps: 500
117
+ save_strategy: steps
118
+ save_total_limit: None
119
+ seed: 123
120
+ skip_memory_metrics: true
121
+ split_batches: None
122
+ tf32: None
123
+ torch_compile: false
124
+ torch_compile_backend: None
125
+ torch_compile_mode: None
126
+ torch_empty_cache_steps: None
127
+ torchdynamo: None
128
+ tpu_metrics_debug: false
129
+ tpu_num_cores: None
130
+ train_batch_size: 4
131
+ use_cpu: false
132
+ use_ipex: false
133
+ use_legacy_prediction_loop: false
134
+ use_liger_kernel: false
135
+ use_mps_device: false
136
+ warmup_ratio: 0.01
137
+ warmup_steps: 0
138
+ weight_decay: 0.0
139
+ weight_disc: 3
140
+ weight_duration: 1
141
+ weight_fmaps: 1
142
+ weight_gen: 1
143
+ weight_kl: 1.0
144
+ weight_mel: 30
tmp/vits_dutch_finetuned/runs/Dec23_17-01-28_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734952600.DESKTOP-7MSM0GK.12954.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e60334992af300424a1b6b60a9a12da20bbd8ed400157e4b7a095ce9be56830
3
+ size 13388
tmp/vits_dutch_finetuned/runs/Dec23_17-04-42_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952795.457109/events.out.tfevents.1734952795.DESKTOP-7MSM0GK.13651.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bf7fc2be7b031f052457b7dc56ab507e38f8cb68e1d361efa16761ae53a8a4d
3
+ size 7964
tmp/vits_dutch_finetuned/runs/Dec23_17-04-42_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734952795.4594233/hparams.yml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerator_config: '{''split_batches'': False, ''dispatch_batches'': None, ''even_batches'':
2
+ True, ''use_seedable_sampler'': True, ''non_blocking'': False, ''gradient_accumulation_kwargs'':
3
+ None}'
4
+ adafactor: false
5
+ adam_beta1: 0.9
6
+ adam_beta2: 0.999
7
+ adam_epsilon: 1.0e-08
8
+ auto_find_batch_size: false
9
+ average_tokens_across_devices: false
10
+ batch_eval_metrics: false
11
+ bf16: false
12
+ bf16_full_eval: false
13
+ data_seed: None
14
+ dataloader_drop_last: false
15
+ dataloader_num_workers: 0
16
+ dataloader_persistent_workers: false
17
+ dataloader_pin_memory: true
18
+ dataloader_prefetch_factor: None
19
+ ddp_backend: None
20
+ ddp_broadcast_buffers: None
21
+ ddp_bucket_cap_mb: None
22
+ ddp_find_unused_parameters: None
23
+ ddp_timeout: 1800
24
+ debug: '[]'
25
+ deepspeed: None
26
+ disable_tqdm: false
27
+ dispatch_batches: None
28
+ do_eval: true
29
+ do_predict: false
30
+ do_step_schedule_per_epoch: true
31
+ do_train: true
32
+ eval_accumulation_steps: None
33
+ eval_batch_size: 4
34
+ eval_delay: 0
35
+ eval_do_concat_batches: true
36
+ eval_on_start: false
37
+ eval_steps: 25
38
+ eval_strategy: 'no'
39
+ eval_use_gather_object: false
40
+ evaluation_strategy: None
41
+ fp16: true
42
+ fp16_backend: auto
43
+ fp16_full_eval: false
44
+ fp16_opt_level: O1
45
+ fsdp: '[]'
46
+ fsdp_config: '{''min_num_params'': 0, ''xla'': False, ''xla_fsdp_v2'': False, ''xla_fsdp_grad_ckpt'':
47
+ False}'
48
+ fsdp_min_num_params: 0
49
+ fsdp_transformer_layer_cls_to_wrap: None
50
+ full_determinism: false
51
+ gradient_accumulation_steps: 2
52
+ gradient_checkpointing: false
53
+ gradient_checkpointing_kwargs: None
54
+ greater_is_better: None
55
+ group_by_length: false
56
+ half_precision_backend: auto
57
+ hub_always_push: false
58
+ hub_model_id: vits_dutch_finetuned_common_voice
59
+ hub_private_repo: None
60
+ hub_strategy: every_save
61
+ hub_token: <HUB_TOKEN>
62
+ ignore_data_skip: false
63
+ include_for_metrics: '[]'
64
+ include_inputs_for_metrics: false
65
+ include_num_input_tokens_seen: false
66
+ include_tokens_per_second: false
67
+ jit_mode_eval: false
68
+ label_names: None
69
+ label_smoothing_factor: 0.0
70
+ learning_rate: 3.0e-05
71
+ length_column_name: length
72
+ load_best_model_at_end: false
73
+ local_rank: 0
74
+ log_level: passive
75
+ log_level_replica: warning
76
+ log_on_each_node: true
77
+ logging_dir: ./tmp/vits_dutch_finetuned/runs/Dec23_17-04-42_DESKTOP-7MSM0GK
78
+ logging_first_step: false
79
+ logging_nan_inf_filter: true
80
+ logging_steps: 500
81
+ logging_strategy: steps
82
+ lr_decay: 0.999875
83
+ lr_scheduler_kwargs: '{}'
84
+ lr_scheduler_type: linear
85
+ max_grad_norm: 1.0
86
+ max_steps: 1700
87
+ metric_for_best_model: None
88
+ mp_parameters: ''
89
+ neftune_noise_alpha: None
90
+ no_cuda: false
91
+ num_train_epochs: 50
92
+ optim: adamw_torch
93
+ optim_args: None
94
+ optim_target_modules: None
95
+ output_dir: ./tmp/vits_dutch_finetuned
96
+ overwrite_output_dir: true
97
+ past_index: -1
98
+ per_device_eval_batch_size: 4
99
+ per_device_train_batch_size: 4
100
+ per_gpu_eval_batch_size: None
101
+ per_gpu_train_batch_size: None
102
+ prediction_loss_only: false
103
+ push_to_hub: true
104
+ push_to_hub_model_id: None
105
+ push_to_hub_organization: None
106
+ push_to_hub_token: <PUSH_TO_HUB_TOKEN>
107
+ ray_scope: last
108
+ remove_unused_columns: true
109
+ report_to: '[''tensorboard'', ''wandb'']'
110
+ restore_callback_states_from_checkpoint: false
111
+ resume_from_checkpoint: None
112
+ run_name: ./tmp/vits_dutch_finetuned
113
+ save_on_each_node: false
114
+ save_only_model: false
115
+ save_safetensors: true
116
+ save_steps: 500
117
+ save_strategy: steps
118
+ save_total_limit: None
119
+ seed: 123
120
+ skip_memory_metrics: true
121
+ split_batches: None
122
+ tf32: None
123
+ torch_compile: false
124
+ torch_compile_backend: None
125
+ torch_compile_mode: None
126
+ torch_empty_cache_steps: None
127
+ torchdynamo: None
128
+ tpu_metrics_debug: false
129
+ tpu_num_cores: None
130
+ train_batch_size: 4
131
+ use_cpu: false
132
+ use_ipex: false
133
+ use_legacy_prediction_loop: false
134
+ use_liger_kernel: false
135
+ use_mps_device: false
136
+ warmup_ratio: 0.01
137
+ warmup_steps: 0
138
+ weight_decay: 0.0
139
+ weight_disc: 3
140
+ weight_duration: 1
141
+ weight_fmaps: 1
142
+ weight_gen: 1
143
+ weight_kl: 1.0
144
+ weight_mel: 30
tmp/vits_dutch_finetuned/runs/Dec23_17-04-42_DESKTOP-7MSM0GK/vits_dutch_common_voice/events.out.tfevents.1734952793.DESKTOP-7MSM0GK.13651.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9b9ae09f1c0e5db3b17c1dacfdfb9f391d98863bdc60ed145e86447d4de51a6
3
+ size 13388
tmp/vits_dutch_finetuned/runs/Dec23_17-10-29_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734953143.6386623/events.out.tfevents.1734953143.DESKTOP-7MSM0GK.14803.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6056da2dcade4f231a13501c2d478ae8fdc1e144971ff6b38ec3178b1c32822
3
+ size 7964
tmp/vits_dutch_finetuned/runs/Dec23_17-10-29_DESKTOP-7MSM0GK/vits_dutch_common_voice/1734953143.6412678/hparams.yml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerator_config: '{''split_batches'': False, ''dispatch_batches'': None, ''even_batches'':
2
+ True, ''use_seedable_sampler'': True, ''non_blocking'': False, ''gradient_accumulation_kwargs'':
3
+ None}'
4
+ adafactor: false
5
+ adam_beta1: 0.9
6
+ adam_beta2: 0.999
7
+ adam_epsilon: 1.0e-08
8
+ auto_find_batch_size: false
9
+ average_tokens_across_devices: false
10
+ batch_eval_metrics: false
11
+ bf16: false
12
+ bf16_full_eval: false
13
+ data_seed: None
14
+ dataloader_drop_last: false
15
+ dataloader_num_workers: 0
16
+ dataloader_persistent_workers: false
17
+ dataloader_pin_memory: true
18
+ dataloader_prefetch_factor: None
19
+ ddp_backend: None
20
+ ddp_broadcast_buffers: None
21
+ ddp_bucket_cap_mb: None
22
+ ddp_find_unused_parameters: None
23
+ ddp_timeout: 1800
24
+ debug: '[]'
25
+ deepspeed: None
26
+ disable_tqdm: false
27
+ dispatch_batches: None
28
+ do_eval: true
29
+ do_predict: false
30
+ do_step_schedule_per_epoch: true
31
+ do_train: true
32
+ eval_accumulation_steps: None
33
+ eval_batch_size: 4
34
+ eval_delay: 0
35
+ eval_do_concat_batches: true
36
+ eval_on_start: false
37
+ eval_steps: 25
38
+ eval_strategy: 'no'
39
+ eval_use_gather_object: false
40
+ evaluation_strategy: None
41
+ fp16: true
42
+ fp16_backend: auto
43
+ fp16_full_eval: false
44
+ fp16_opt_level: O1
45
+ fsdp: '[]'
46
+ fsdp_config: '{''min_num_params'': 0, ''xla'': False, ''xla_fsdp_v2'': False, ''xla_fsdp_grad_ckpt'':
47
+ False}'
48
+ fsdp_min_num_params: 0
49
+ fsdp_transformer_layer_cls_to_wrap: None
50
+ full_determinism: false
51
+ gradient_accumulation_steps: 2
52
+ gradient_checkpointing: false
53
+ gradient_checkpointing_kwargs: None
54
+ greater_is_better: None
55
+ group_by_length: false
56
+ half_precision_backend: auto
57
+ hub_always_push: false
58
+ hub_model_id: vits_dutch_finetuned_common_voice
59
+ hub_private_repo: None
60
+ hub_strategy: every_save
61
+ hub_token: <HUB_TOKEN>
62
+ ignore_data_skip: false
63
+ include_for_metrics: '[]'
64
+ include_inputs_for_metrics: false
65
+ include_num_input_tokens_seen: false
66
+ include_tokens_per_second: false
67
+ jit_mode_eval: false
68
+ label_names: None
69
+ label_smoothing_factor: 0.0
70
+ learning_rate: 3.0e-05
71
+ length_column_name: length
72
+ load_best_model_at_end: false
73
+ local_rank: 0
74
+ log_level: passive
75
+ log_level_replica: warning
76
+ log_on_each_node: true
77
+ logging_dir: ./tmp/vits_dutch_finetuned/runs/Dec23_17-10-29_DESKTOP-7MSM0GK
78
+ logging_first_step: false
79
+ logging_nan_inf_filter: true
80
+ logging_steps: 500
81
+ logging_strategy: steps
82
+ lr_decay: 0.999875
83
+ lr_scheduler_kwargs: '{}'
84
+ lr_scheduler_type: linear
85
+ max_grad_norm: 1.0
86
+ max_steps: 1700
87
+ metric_for_best_model: None
88
+ mp_parameters: ''
89
+ neftune_noise_alpha: None
90
+ no_cuda: false
91
+ num_train_epochs: 50
92
+ optim: adamw_torch
93
+ optim_args: None
94
+ optim_target_modules: None
95
+ output_dir: ./tmp/vits_dutch_finetuned
96
+ overwrite_output_dir: true
97
+ past_index: -1
98
+ per_device_eval_batch_size: 4
99
+ per_device_train_batch_size: 4
100
+ per_gpu_eval_batch_size: None
101
+ per_gpu_train_batch_size: None
102
+ prediction_loss_only: false
103
+ push_to_hub: true
104
+ push_to_hub_model_id: None
105
+ push_to_hub_organization: None
106
+ push_to_hub_token: <PUSH_TO_HUB_TOKEN>
107
+ ray_scope: last
108
+ remove_unused_columns: true
109
+ report_to: '[''tensorboard'', ''wandb'']'
110
+ restore_callback_states_from_checkpoint: false
111
+ resume_from_checkpoint: None
112
+ run_name: ./tmp/vits_dutch_finetuned
113
+ save_on_each_node: false
114
+ save_only_model: false
115
+ save_safetensors: true
116
+ save_steps: 500
117
+ save_strategy: steps
118
+ save_total_limit: None
119
+ seed: 123
120
+ skip_memory_metrics: true
121
+ split_batches: None
122
+ tf32: None
123
+ torch_compile: false
124
+ torch_compile_backend: None
125
+ torch_compile_mode: None
126
+ torch_empty_cache_steps: None
127
+ torchdynamo: None
128
+ tpu_metrics_debug: false
129
+ tpu_num_cores: None
130
+ train_batch_size: 4
131
+ use_cpu: false
132
+ use_ipex: false
133
+ use_legacy_prediction_loop: false
134
+ use_liger_kernel: false
135
+ use_mps_device: false
136
+ warmup_ratio: 0.01
137
+ warmup_steps: 0
138
+ weight_decay: 0.0
139
+ weight_disc: 3
140
+ weight_duration: 1
141
+ weight_fmaps: 1
142
+ weight_gen: 1
143
+ weight_kl: 1.0
144
+ weight_mel: 30