w11wo commited on
Commit
aa6d0a8
·
1 Parent(s): fbf1b69

Added Model

Browse files
D_800000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea05db6071d319a7d586e437ca10baf1367d8a1e2136c8bfae19b515c4e8b5af
3
+ size 561099642
G_800000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7acf75d12d84635f2a96b6a5aedaf6602c9b4415e8b5b36e9ce711008448878f
3
+ size 438217614
README.md CHANGED
@@ -1,3 +1,48 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language: sw
3
+ license: cc-by-sa-4.0
4
+ tags:
5
+ - audio
6
+ - text-to-speech
7
+ inference: false
8
+ datasets:
9
+ - bookbot/OpenBible_Swahili
10
  ---
11
+
12
+ # VITS Base sw-KE-OpenBible
13
+
14
+ VITS Base sw-KE-OpenBible is an end-to-end text-to-speech model based on the [VITS](https://arxiv.org/abs/2106.06103) architecture. This model was trained from scratch on a real audio dataset. The list of real speakers include:
15
+
16
+ - sw-KE-OpenBible
17
+
18
+ The model's [vocabulary](https://huggingface.co/bookbot/vits-base-sw-KE-OpenBible/blob/main/symbols.py) contains the different IPA phonemes found in [gruut](https://github.com/rhasspy/gruut).
19
+
20
+ This model was trained using [VITS](https://github.com/jaywalnut310/vits) framework. All training was done on a Scaleway L40S VM with a NVIDIA L40S GPU. All necessary scripts used for training could be found in the [Files and versions](https://huggingface.co/bookbot/vits-base-sw-KE-OpenBible/tree/main) tab, as well as the [Training metrics](https://huggingface.co/bookbot/vits-base-sw-KE-OpenBible/tensorboard) logged via Tensorboard.
21
+
22
+ ## Model
23
+
24
+ | Model | SR (Hz) | Mel range (Hz) | FFT / Hop / Win | #epochs |
25
+ | ------------------------- | ------- | -------------- | ----------------- | ------- |
26
+ | VITS Base sw-KE-OpenBible | 44.1K | 0-null | 2048 / 512 / 2048 | 12000 |
27
+
28
+ ## Training procedure
29
+
30
+ ### Prepare Data
31
+
32
+ ```sh
33
+ python preprocess.py \
34
+ --text_index 1 \
35
+ --filelists filelists/sw-KE-OpenBible_text_train_filelist.txt filelists/sw-KE-OpenBible_text_val_filelist.txt \
36
+ --text_cleaners swahili_cleaners
37
+ ```
38
+
39
+ ### Train
40
+
41
+ ```sh
42
+ python train.py -c configs/sw_ke_openbible_base.json -m sw_ke_openbible_base
43
+ ```
44
+
45
+ ## Frameworks
46
+
47
+ - PyTorch 2.2.2
48
+ - [VITS](https://github.com/bookbot-hive/vits)
config.json ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 100000,
5
+ "seed": 1234,
6
+ "epochs": 12000,
7
+ "learning_rate": 2e-4,
8
+ "betas": [
9
+ 0.8,
10
+ 0.99
11
+ ],
12
+ "eps": 1e-9,
13
+ "batch_size": 64,
14
+ "fp16_run": true,
15
+ "lr_decay": 0.999875,
16
+ "segment_size": 8192,
17
+ "init_lr_ratio": 1,
18
+ "warmup_epochs": 0,
19
+ "c_mel": 45,
20
+ "c_kl": 1.0
21
+ },
22
+ "data": {
23
+ "training_files": "filelists/sw-KE-OpenBible_text_train_filelist.txt.cleaned",
24
+ "validation_files": "filelists/sw-KE-OpenBible_text_val_filelist.txt.cleaned",
25
+ "text_cleaners": [
26
+ "swahili_cleaners"
27
+ ],
28
+ "max_wav_value": 1.0,
29
+ "sampling_rate": 44100,
30
+ "filter_length": 2048,
31
+ "hop_length": 512,
32
+ "win_length": 2048,
33
+ "n_mel_channels": 80,
34
+ "mel_fmin": 0.0,
35
+ "mel_fmax": null,
36
+ "add_blank": true,
37
+ "n_speakers": 0,
38
+ "cleaned_text": true
39
+ },
40
+ "model": {
41
+ "inter_channels": 192,
42
+ "hidden_channels": 192,
43
+ "filter_channels": 768,
44
+ "n_heads": 2,
45
+ "n_layers": 6,
46
+ "kernel_size": 3,
47
+ "p_dropout": 0.1,
48
+ "resblock": "1",
49
+ "resblock_kernel_sizes": [
50
+ 3,
51
+ 7,
52
+ 11
53
+ ],
54
+ "resblock_dilation_sizes": [
55
+ [
56
+ 1,
57
+ 3,
58
+ 5
59
+ ],
60
+ [
61
+ 1,
62
+ 3,
63
+ 5
64
+ ],
65
+ [
66
+ 1,
67
+ 3,
68
+ 5
69
+ ]
70
+ ],
71
+ "upsample_rates": [
72
+ 8,
73
+ 8,
74
+ 2,
75
+ 2,
76
+ 2
77
+ ],
78
+ "upsample_initial_channel": 512,
79
+ "upsample_kernel_sizes": [
80
+ 16,
81
+ 16,
82
+ 4,
83
+ 4,
84
+ 4
85
+ ],
86
+ "n_layers_q": 3,
87
+ "use_spectral_norm": false
88
+ }
89
+ }
eval/events.out.tfevents.1713522751.bookbot-l40s.75863.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb67df477e4d2e8492f9ab32f08446179e2c97d4e30ce03710c0d1a0adf99bdf
3
+ size 7399613
events.out.tfevents.1713522751.bookbot-l40s.75863.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f58a06f01d3f10c0aac2d1c93cda6171e16fceafd091aeda70d2ed3c0f0454fa
3
+ size 516111967
symbols.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/keithito/tacotron """
2
+
3
+ """
4
+ Defines the set of symbols used in text input to the model.
5
+ """
6
+ _pad = "_"
7
+ _punctuation = ";:,.!? "
8
+ _letters_ipa = [
9
+ "f",
10
+ "h",
11
+ "i",
12
+ "j",
13
+ "k",
14
+ "l",
15
+ "m",
16
+ "n",
17
+ "p",
18
+ "s",
19
+ "t",
20
+ "t͡ʃ",
21
+ "u",
22
+ "v",
23
+ "w",
24
+ "x",
25
+ "z",
26
+ "ð",
27
+ "ɑ",
28
+ "ɓ",
29
+ "ɔ",
30
+ "ɗ",
31
+ "ɛ",
32
+ "ɠ",
33
+ "ɣ",
34
+ "ɾ",
35
+ "ʃ",
36
+ "ʄ",
37
+ "θ",
38
+ "ᵐɓ",
39
+ "ᵑg",
40
+ "ᶬv",
41
+ "ⁿz",
42
+ "ⁿɗ",
43
+ "ⁿɗ͡ʒ",
44
+ ]
45
+
46
+
47
+ # Export all symbols:
48
+ symbols = [_pad] + list(_punctuation) + _letters_ipa
49
+
50
+ # Special symbol ids
51
+ SPACE_ID = symbols.index(" ")