Zhijunv2 kevinwang676 commited on
Commit
eef6d60
0 Parent(s):

Duplicate from kevinwang676/Bark-with-Voice-Cloning

Browse files

Co-authored-by: Kevin Wang <[email protected]>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .flake8 +17 -0
  2. .gitattributes +44 -0
  3. .gitignore +160 -0
  4. DeepFilterNet2/checkpoints/model_96.ckpt.best +3 -0
  5. DeepFilterNet2/config.ini +108 -0
  6. README.md +14 -0
  7. app.py +674 -0
  8. bark/__init__.py +2 -0
  9. bark/api.py +158 -0
  10. bark/assets/prompts/announcer.npz +3 -0
  11. bark/assets/prompts/v2/en_speaker_0.npz +3 -0
  12. bark/assets/prompts/v2/en_speaker_1.npz +3 -0
  13. bark/assets/prompts/v2/en_speaker_2.npz +3 -0
  14. bark/assets/prompts/v2/en_speaker_3.npz +3 -0
  15. bark/assets/prompts/v2/en_speaker_4.npz +3 -0
  16. bark/assets/prompts/v2/en_speaker_5.npz +3 -0
  17. bark/assets/prompts/v2/en_speaker_6.npz +3 -0
  18. bark/assets/prompts/v2/en_speaker_7.npz +3 -0
  19. bark/assets/prompts/v2/en_speaker_8.npz +3 -0
  20. bark/assets/prompts/v2/en_speaker_9.npz +3 -0
  21. bark/assets/prompts/v2/zh_speaker_0.npz +3 -0
  22. bark/assets/prompts/v2/zh_speaker_1.npz +3 -0
  23. bark/assets/prompts/v2/zh_speaker_2.npz +3 -0
  24. bark/assets/prompts/v2/zh_speaker_3.npz +3 -0
  25. bark/assets/prompts/v2/zh_speaker_4.npz +3 -0
  26. bark/assets/prompts/v2/zh_speaker_5.npz +3 -0
  27. bark/assets/prompts/v2/zh_speaker_6.npz +3 -0
  28. bark/assets/prompts/v2/zh_speaker_7.npz +3 -0
  29. bark/assets/prompts/v2/zh_speaker_8.npz +3 -0
  30. bark/assets/prompts/v2/zh_speaker_9.npz +3 -0
  31. bark/generation.py +864 -0
  32. bark/hubert/__init__.py +0 -0
  33. bark/hubert/customtokenizer.py +195 -0
  34. bark/hubert/hubert_manager.py +48 -0
  35. bark/hubert/pre_kmeans_hubert.py +107 -0
  36. bark/model.py +218 -0
  37. bark/model_fine.py +149 -0
  38. bark/settings.py +7 -0
  39. cloning/__init__.py +0 -0
  40. cloning/clonevoice.py +68 -0
  41. config.yaml +8 -0
  42. packages.txt +1 -0
  43. pyproject.toml +69 -0
  44. requirements.txt +18 -0
  45. samples/dkitchen.wav +3 -0
  46. samples/dliving.wav +0 -0
  47. samples/nriver.wav +3 -0
  48. samples/p232_013_clean.wav +0 -0
  49. samples/p232_019_clean.wav +0 -0
  50. samples/scafe.wav +3 -0
.flake8 ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [flake8]
2
+ ignore = E203, E266, E501, W503
3
+ max-line-length = 100
4
+ import-order-style = google
5
+ application-import-names = flake8
6
+ select = B,C,E,F,W,T4,B9
7
+ exclude =
8
+ .tox,
9
+ .git,
10
+ __pycache__,
11
+ docs,
12
+ sbatch,
13
+ .venv,
14
+ *.pyc,
15
+ *.egg-info,
16
+ .cache,
17
+ .eggs
.gitattributes ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ SE_checkpoint.pth.tar filter=lfs diff=lfs merge=lfs -text
36
+ best_model.pth.tar filter=lfs diff=lfs merge=lfs -text
37
+ nana_longest_vocal.wav filter=lfs diff=lfs merge=lfs -text
38
+ test.wav filter=lfs diff=lfs merge=lfs -text
39
+ reference.wav filter=lfs diff=lfs merge=lfs -text
40
+ ref.wav filter=lfs diff=lfs merge=lfs -text
41
+ DeepFilterNet2/checkpoints/model_96.ckpt.best filter=lfs diff=lfs merge=lfs -text
42
+ samples/dkitchen.wav filter=lfs diff=lfs merge=lfs -text
43
+ samples/nriver.wav filter=lfs diff=lfs merge=lfs -text
44
+ samples/scafe.wav filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ /outputs
3
+ /speakers
4
+ .vs
5
+ *.npz
6
+ *.wav
7
+ *.npy
8
+ .vs/
9
+ /models
10
+ /bark_ui_enhanced.egg-info
11
+ /build/lib/bark
12
+ *.pth
13
+ *.pt
14
+ *.zip
15
+
16
+ # Own stuff
17
+ *.wav
18
+ *.png
19
+ *.pdf
20
+ out/
21
+ export/
22
+ DeepFilterNet/poetry.lock
23
+ gradio_cached_examples/
24
+
25
+ ### Rust gitignore ###
26
+
27
+ # Generated by Cargo
28
+ # will have compiled files and executables
29
+ debug/
30
+ target/
31
+
32
+ # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
33
+ # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
34
+ Cargo.lock
35
+
36
+ # These are backup files generated by rustfmt
37
+ **/*.rs.bk
38
+
39
+ ### Python gitignore ###
40
+
41
+ # Byte-compiled / optimized / DLL files
42
+ __pycache__/
43
+ *.py[cod]
44
+ *$py.class
45
+
46
+ # C extensions
47
+ *.so
48
+
49
+ # Distribution / packaging
50
+ .Python
51
+ build/
52
+ develop-eggs/
53
+ dist/
54
+ downloads/
55
+ eggs/
56
+ .eggs/
57
+ lib/
58
+ lib64/
59
+ parts/
60
+ sdist/
61
+ var/
62
+ wheels/
63
+ pip-wheel-metadata/
64
+ share/python-wheels/
65
+ *.egg-info/
66
+ .installed.cfg
67
+ *.egg
68
+ MANIFEST
69
+
70
+ # PyInstaller
71
+ # Usually these files are written by a python script from a template
72
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
73
+ *.manifest
74
+ *.spec
75
+
76
+ # Installer logs
77
+ pip-log.txt
78
+ pip-delete-this-directory.txt
79
+
80
+ # Unit test / coverage reports
81
+ typings
82
+ htmlcov/
83
+ .tox/
84
+ .nox/
85
+ .coverage
86
+ .coverage.*
87
+ .cache
88
+ nosetests.xml
89
+ coverage.xml
90
+ *.cover
91
+ .hypothesis/
92
+ .pytest_cache/
93
+
94
+ # Translations
95
+ *.mo
96
+ *.pot
97
+
98
+ # Django stuff:
99
+ *.log
100
+ local_settings.py
101
+ db.sqlite3
102
+
103
+ # Flask stuff:
104
+ instance/
105
+ .webassets-cache
106
+
107
+ # Scrapy stuff:
108
+ .scrapy
109
+
110
+ # Sphinx documentation
111
+ docs/_build/
112
+
113
+ # PyBuilder
114
+ target/
115
+
116
+ # Jupyter Notebook
117
+ .ipynb_checkpoints
118
+
119
+ # IPython
120
+ profile_default/
121
+ ipython_config.py
122
+
123
+ # pyenv
124
+ .python-version
125
+
126
+ # celery beat schedule file
127
+ celerybeat-schedule
128
+
129
+ # SageMath parsed files
130
+ *.sage.py
131
+
132
+ # Environments
133
+ .env
134
+ .venv
135
+ env/
136
+ venv/
137
+ ENV/
138
+ env.bak/
139
+ venv.bak/
140
+
141
+ # Spyder project settings
142
+ .spyderproject
143
+ .spyproject
144
+
145
+ # Rope project settings
146
+ .ropeproject
147
+
148
+ # mkdocs documentation
149
+ /site
150
+
151
+ # mypy
152
+ .mypy_cache/
153
+ .dmypy.json
154
+ dmypy.json
155
+
156
+ # Pyre type checker
157
+ .pyre/
158
+
159
+ # IDE
160
+ .idea
DeepFilterNet2/checkpoints/model_96.ckpt.best ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb5eccb429e675bb4ec5ec9e280f048bfff9787b40bd3eb835fd11509eb14a3e
3
+ size 9397209
DeepFilterNet2/config.ini ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [train]
2
+ seed = 43
3
+ device =
4
+ model = deepfilternet2
5
+ jit = false
6
+ mask_only = false
7
+ df_only = false
8
+ batch_size = 96
9
+ batch_size_eval = 128
10
+ num_workers = 16
11
+ max_sample_len_s = 3.0
12
+ p_atten_lim = 0.0
13
+ p_reverb = 0.1
14
+ overfit = false
15
+ max_epochs = 100
16
+ log_freq = 100
17
+ log_timings = False
18
+ validation_criteria = loss
19
+ validation_criteria_rule = min
20
+ early_stopping_patience = 15
21
+ global_ds_sampling_f = 1
22
+ num_prefetch_batches = 8
23
+ dataloader_snrs = -5,0,5,10,20,40
24
+ detect_anomaly = false
25
+ batch_size_scheduling = 0/8,1/16,2/24,5/32,10/64,20/128,40/9999
26
+ start_eval = true
27
+ validation_set_caching = false
28
+
29
+ [df]
30
+ sr = 48000
31
+ fft_size = 960
32
+ hop_size = 480
33
+ nb_erb = 32
34
+ nb_df = 96
35
+ norm_tau = 1
36
+ lsnr_max = 35
37
+ lsnr_min = -15
38
+ min_nb_erb_freqs = 2
39
+ pad_mode = input_specf
40
+
41
+ [deepfilternet]
42
+ conv_lookahead = 2
43
+ conv_ch = 64
44
+ conv_depthwise = True
45
+ emb_hidden_dim = 256
46
+ emb_num_layers = 3
47
+ gru_groups = 8
48
+ linear_groups = 8
49
+ conv_dec_mode = transposed
50
+ convt_depthwise = True
51
+ mask_pf = False
52
+ df_order = 5
53
+ df_lookahead = 2
54
+ df_hidden_dim = 256
55
+ df_num_layers = 2
56
+ dfop_method = df
57
+ group_shuffle = False
58
+ conv_kernel = 1,3
59
+ df_gru_skip = none
60
+ df_output_layer = groupedlinear
61
+ gru_type = squeeze
62
+ df_pathway_kernel_size_t = 5
63
+ df_n_iter = 1
64
+ enc_concat = True
65
+ conv_kernel_inp = 3,3
66
+
67
+ [localsnrloss]
68
+ factor = 1e-3
69
+
70
+ [maskloss]
71
+ factor = 0
72
+ mask = iam
73
+ gamma = 0.6
74
+ gamma_pred = 0.6
75
+ f_under = 1
76
+
77
+ [spectralloss]
78
+ factor_magnitude = 1000
79
+ factor_complex = 1000
80
+ gamma = 0.3
81
+
82
+ [dfalphaloss]
83
+ factor = 0.0
84
+
85
+ [multiresspecloss]
86
+ factor = 500
87
+ factor_complex = 500
88
+ gamma = 0.3
89
+ fft_sizes = 256,512,1024
90
+
91
+ [optim]
92
+ lr = 0.001
93
+ momentum = 0
94
+ weight_decay = 1e-12
95
+ weight_decay_end = 0.05
96
+ optimizer = adamw
97
+ lr_min = 1e-06
98
+ lr_warmup = 0.0001
99
+ warmup_epochs = 3
100
+ lr_cycle_mul = 1.0
101
+ lr_cycle_decay = 0.5
102
+ lr_cycle_limit = 1
103
+ lr_update_per_epoch = False
104
+ lr_cycle_epochs = -1
105
+
106
+ [sdrloss]
107
+ factor = 0.0
108
+ segmental_ws = 0
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Bark with Voice Cloning
3
+ emoji: 📊
4
+ colorFrom: purple
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 3.34.0
8
+ app_file: app.py
9
+ pinned: true
10
+ license: mit
11
+ duplicated_from: kevinwang676/Bark-with-Voice-Cloning
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from cProfile import label
2
+ import dataclasses
3
+ from distutils.command.check import check
4
+ from doctest import Example
5
+ import gradio as gr
6
+ import os
7
+ import sys
8
+ import numpy as np
9
+ import logging
10
+ import torch
11
+ import pytorch_seed
12
+ import time
13
+
14
+
15
+ import math
16
+ import tempfile
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import matplotlib.pyplot as plt
20
+ from loguru import logger
21
+ from PIL import Image
22
+ from torch import Tensor
23
+ from torchaudio.backend.common import AudioMetaData
24
+
25
+ from df import config
26
+ from df.enhance import enhance, init_df, load_audio, save_audio
27
+ from df.io import resample
28
+
29
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
30
+ model, df, _ = init_df("./DeepFilterNet2", config_allow_defaults=True)
31
+ model = model.to(device=device).eval()
32
+
33
+ fig_noisy: plt.Figure
34
+ fig_enh: plt.Figure
35
+ ax_noisy: plt.Axes
36
+ ax_enh: plt.Axes
37
+ fig_noisy, ax_noisy = plt.subplots(figsize=(15.2, 4))
38
+ fig_noisy.set_tight_layout(True)
39
+ fig_enh, ax_enh = plt.subplots(figsize=(15.2, 4))
40
+ fig_enh.set_tight_layout(True)
41
+
42
+ NOISES = {
43
+ "None": None,
44
+ "Kitchen": "samples/dkitchen.wav",
45
+ "Living Room": "samples/dliving.wav",
46
+ "River": "samples/nriver.wav",
47
+ "Cafe": "samples/scafe.wav",
48
+ }
49
+
50
+
51
+ from xml.sax import saxutils
52
+ from bark.api import generate_with_settings
53
+ from bark.api import save_as_prompt
54
+ from util.settings import Settings
55
+ #import nltk
56
+
57
+ from bark import SAMPLE_RATE
58
+ from cloning.clonevoice import clone_voice
59
+ from bark.generation import SAMPLE_RATE, preload_models, _load_history_prompt, codec_decode
60
+ from scipy.io.wavfile import write as write_wav
61
+ from util.parseinput import split_and_recombine_text, build_ssml, is_ssml, create_clips_from_ssml
62
+ from datetime import datetime
63
+ from tqdm.auto import tqdm
64
+ from util.helper import create_filename, add_id3_tag
65
+ from swap_voice import swap_voice_from_audio
66
+ from training.training_prepare import prepare_semantics_from_text, prepare_wavs_from_semantics
67
+ from training.train import training_prepare_files, train
68
+
69
+
70
+ # Denoise
71
+
72
+ def mix_at_snr(clean, noise, snr, eps=1e-10):
73
+ """Mix clean and noise signal at a given SNR.
74
+ Args:
75
+ clean: 1D Tensor with the clean signal to mix.
76
+ noise: 1D Tensor of shape.
77
+ snr: Signal to noise ratio.
78
+ Returns:
79
+ clean: 1D Tensor with gain changed according to the snr.
80
+ noise: 1D Tensor with the combined noise channels.
81
+ mix: 1D Tensor with added clean and noise signals.
82
+ """
83
+ clean = torch.as_tensor(clean).mean(0, keepdim=True)
84
+ noise = torch.as_tensor(noise).mean(0, keepdim=True)
85
+ if noise.shape[1] < clean.shape[1]:
86
+ noise = noise.repeat((1, int(math.ceil(clean.shape[1] / noise.shape[1]))))
87
+ max_start = int(noise.shape[1] - clean.shape[1])
88
+ start = torch.randint(0, max_start, ()).item() if max_start > 0 else 0
89
+ logger.debug(f"start: {start}, {clean.shape}")
90
+ noise = noise[:, start : start + clean.shape[1]]
91
+ E_speech = torch.mean(clean.pow(2)) + eps
92
+ E_noise = torch.mean(noise.pow(2))
93
+ K = torch.sqrt((E_noise / E_speech) * 10 ** (snr / 10) + eps)
94
+ noise = noise / K
95
+ mixture = clean + noise
96
+ logger.debug("mixture: {mixture.shape}")
97
+ assert torch.isfinite(mixture).all()
98
+ max_m = mixture.abs().max()
99
+ if max_m > 1:
100
+ logger.warning(f"Clipping detected during mixing. Reducing gain by {1/max_m}")
101
+ clean, noise, mixture = clean / max_m, noise / max_m, mixture / max_m
102
+ return clean, noise, mixture
103
+
104
+
105
+ def load_audio_gradio(
106
+ audio_or_file: Union[None, str, Tuple[int, np.ndarray]], sr: int
107
+ ) -> Optional[Tuple[Tensor, AudioMetaData]]:
108
+ if audio_or_file is None:
109
+ return None
110
+ if isinstance(audio_or_file, str):
111
+ if audio_or_file.lower() == "none":
112
+ return None
113
+ # First try default format
114
+ audio, meta = load_audio(audio_or_file, sr)
115
+ else:
116
+ meta = AudioMetaData(-1, -1, -1, -1, "")
117
+ assert isinstance(audio_or_file, (tuple, list))
118
+ meta.sample_rate, audio_np = audio_or_file
119
+ # Gradio documentation says, the shape is [samples, 2], but apparently sometimes its not.
120
+ audio_np = audio_np.reshape(audio_np.shape[0], -1).T
121
+ if audio_np.dtype == np.int16:
122
+ audio_np = (audio_np / (1 << 15)).astype(np.float32)
123
+ elif audio_np.dtype == np.int32:
124
+ audio_np = (audio_np / (1 << 31)).astype(np.float32)
125
+ audio = resample(torch.from_numpy(audio_np), meta.sample_rate, sr)
126
+ return audio, meta
127
+
128
+
129
+ def demo_fn(speech_upl: str, noise_type: str, snr: int, mic_input: str):
130
+ if mic_input:
131
+ speech_upl = mic_input
132
+ sr = config("sr", 48000, int, section="df")
133
+ logger.info(f"Got parameters speech_upl: {speech_upl}, noise: {noise_type}, snr: {snr}")
134
+ snr = int(snr)
135
+ noise_fn = NOISES[noise_type]
136
+ meta = AudioMetaData(-1, -1, -1, -1, "")
137
+ max_s = 1000 # limit to 10 seconds
138
+ if speech_upl is not None:
139
+ sample, meta = load_audio(speech_upl, sr)
140
+ max_len = max_s * sr
141
+ if sample.shape[-1] > max_len:
142
+ start = torch.randint(0, sample.shape[-1] - max_len, ()).item()
143
+ sample = sample[..., start : start + max_len]
144
+ else:
145
+ sample, meta = load_audio("samples/p232_013_clean.wav", sr)
146
+ sample = sample[..., : max_s * sr]
147
+ if sample.dim() > 1 and sample.shape[0] > 1:
148
+ assert (
149
+ sample.shape[1] > sample.shape[0]
150
+ ), f"Expecting channels first, but got {sample.shape}"
151
+ sample = sample.mean(dim=0, keepdim=True)
152
+ logger.info(f"Loaded sample with shape {sample.shape}")
153
+ if noise_fn is not None:
154
+ noise, _ = load_audio(noise_fn, sr) # type: ignore
155
+ logger.info(f"Loaded noise with shape {noise.shape}")
156
+ _, _, sample = mix_at_snr(sample, noise, snr)
157
+ logger.info("Start denoising audio")
158
+ enhanced = enhance(model, df, sample)
159
+ logger.info("Denoising finished")
160
+ lim = torch.linspace(0.0, 1.0, int(sr * 0.15)).unsqueeze(0)
161
+ lim = torch.cat((lim, torch.ones(1, enhanced.shape[1] - lim.shape[1])), dim=1)
162
+ enhanced = enhanced * lim
163
+ if meta.sample_rate != sr:
164
+ enhanced = resample(enhanced, sr, meta.sample_rate)
165
+ sample = resample(sample, sr, meta.sample_rate)
166
+ sr = meta.sample_rate
167
+ enhanced_wav = tempfile.NamedTemporaryFile(suffix="enhanced.wav", delete=False).name
168
+ save_audio(enhanced_wav, enhanced, sr)
169
+ logger.info(f"saved audios: {enhanced_wav}")
170
+ ax_noisy.clear()
171
+ ax_enh.clear()
172
+ # noisy_wav = gr.make_waveform(noisy_fn, bar_count=200)
173
+ # enh_wav = gr.make_waveform(enhanced_fn, bar_count=200)
174
+ return enhanced_wav
175
+
176
+
177
+ def specshow(
178
+ spec,
179
+ ax=None,
180
+ title=None,
181
+ xlabel=None,
182
+ ylabel=None,
183
+ sr=48000,
184
+ n_fft=None,
185
+ hop=None,
186
+ t=None,
187
+ f=None,
188
+ vmin=-100,
189
+ vmax=0,
190
+ xlim=None,
191
+ ylim=None,
192
+ cmap="inferno",
193
+ ):
194
+ """Plots a spectrogram of shape [F, T]"""
195
+ spec_np = spec.cpu().numpy() if isinstance(spec, torch.Tensor) else spec
196
+ if ax is not None:
197
+ set_title = ax.set_title
198
+ set_xlabel = ax.set_xlabel
199
+ set_ylabel = ax.set_ylabel
200
+ set_xlim = ax.set_xlim
201
+ set_ylim = ax.set_ylim
202
+ else:
203
+ ax = plt
204
+ set_title = plt.title
205
+ set_xlabel = plt.xlabel
206
+ set_ylabel = plt.ylabel
207
+ set_xlim = plt.xlim
208
+ set_ylim = plt.ylim
209
+ if n_fft is None:
210
+ if spec.shape[0] % 2 == 0:
211
+ n_fft = spec.shape[0] * 2
212
+ else:
213
+ n_fft = (spec.shape[0] - 1) * 2
214
+ hop = hop or n_fft // 4
215
+ if t is None:
216
+ t = np.arange(0, spec_np.shape[-1]) * hop / sr
217
+ if f is None:
218
+ f = np.arange(0, spec_np.shape[0]) * sr // 2 / (n_fft // 2) / 1000
219
+ im = ax.pcolormesh(
220
+ t, f, spec_np, rasterized=True, shading="auto", vmin=vmin, vmax=vmax, cmap=cmap
221
+ )
222
+ if title is not None:
223
+ set_title(title)
224
+ if xlabel is not None:
225
+ set_xlabel(xlabel)
226
+ if ylabel is not None:
227
+ set_ylabel(ylabel)
228
+ if xlim is not None:
229
+ set_xlim(xlim)
230
+ if ylim is not None:
231
+ set_ylim(ylim)
232
+ return im
233
+
234
+
235
+ def spec_im(
236
+ audio: torch.Tensor,
237
+ figsize=(15, 5),
238
+ colorbar=False,
239
+ colorbar_format=None,
240
+ figure=None,
241
+ labels=True,
242
+ **kwargs,
243
+ ) -> Image:
244
+ audio = torch.as_tensor(audio)
245
+ if labels:
246
+ kwargs.setdefault("xlabel", "Time [s]")
247
+ kwargs.setdefault("ylabel", "Frequency [Hz]")
248
+ n_fft = kwargs.setdefault("n_fft", 1024)
249
+ hop = kwargs.setdefault("hop", 512)
250
+ w = torch.hann_window(n_fft, device=audio.device)
251
+ spec = torch.stft(audio, n_fft, hop, window=w, return_complex=False)
252
+ spec = spec.div_(w.pow(2).sum())
253
+ spec = torch.view_as_complex(spec).abs().clamp_min(1e-12).log10().mul(10)
254
+ kwargs.setdefault("vmax", max(0.0, spec.max().item()))
255
+
256
+ if figure is None:
257
+ figure = plt.figure(figsize=figsize)
258
+ figure.set_tight_layout(True)
259
+ if spec.dim() > 2:
260
+ spec = spec.squeeze(0)
261
+ im = specshow(spec, **kwargs)
262
+ if colorbar:
263
+ ckwargs = {}
264
+ if "ax" in kwargs:
265
+ if colorbar_format is None:
266
+ if kwargs.get("vmin", None) is not None or kwargs.get("vmax", None) is not None:
267
+ colorbar_format = "%+2.0f dB"
268
+ ckwargs = {"ax": kwargs["ax"]}
269
+ plt.colorbar(im, format=colorbar_format, **ckwargs)
270
+ figure.canvas.draw()
271
+ return Image.frombytes("RGB", figure.canvas.get_width_height(), figure.canvas.tostring_rgb())
272
+
273
+
274
+ def toggle(choice):
275
+ if choice == "mic":
276
+ return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
277
+ else:
278
+ return gr.update(visible=False, value=None), gr.update(visible=True, value=None)
279
+
280
+ # Bark
281
+
282
+ settings = Settings('config.yaml')
283
+
284
+ def generate_text_to_speech(text, selected_speaker, text_temp, waveform_temp, eos_prob, quick_generation, complete_settings, seed, batchcount, progress=gr.Progress(track_tqdm=True)):
285
+ # Chunk the text into smaller pieces then combine the generated audio
286
+
287
+ # generation settings
288
+ if selected_speaker == 'None':
289
+ selected_speaker = None
290
+
291
+ voice_name = selected_speaker
292
+
293
+ if text == None or len(text) < 1:
294
+ if selected_speaker == None:
295
+ raise gr.Error('No text entered!')
296
+
297
+ # Extract audio data from speaker if no text and speaker selected
298
+ voicedata = _load_history_prompt(voice_name)
299
+ audio_arr = codec_decode(voicedata["fine_prompt"])
300
+ result = create_filename(settings.output_folder_path, "None", "extract",".wav")
301
+ save_wav(audio_arr, result)
302
+ return result
303
+
304
+ if batchcount < 1:
305
+ batchcount = 1
306
+
307
+
308
+ silenceshort = np.zeros(int((float(settings.silence_sentence) / 1000.0) * SAMPLE_RATE), dtype=np.int16) # quarter second of silence
309
+ silencelong = np.zeros(int((float(settings.silence_speakers) / 1000.0) * SAMPLE_RATE), dtype=np.float32) # half a second of silence
310
+ use_last_generation_as_history = "Use last generation as history" in complete_settings
311
+ save_last_generation = "Save generation as Voice" in complete_settings
312
+ for l in range(batchcount):
313
+ currentseed = seed
314
+ if seed != None and seed > 2**32 - 1:
315
+ logger.warning(f"Seed {seed} > 2**32 - 1 (max), setting to random")
316
+ currentseed = None
317
+ if currentseed == None or currentseed <= 0:
318
+ currentseed = np.random.default_rng().integers(1, 2**32 - 1)
319
+ assert(0 < currentseed and currentseed < 2**32)
320
+
321
+ progress(0, desc="Generating")
322
+
323
+ full_generation = None
324
+
325
+ all_parts = []
326
+ complete_text = ""
327
+ text = text.lstrip()
328
+ if is_ssml(text):
329
+ list_speak = create_clips_from_ssml(text)
330
+ prev_speaker = None
331
+ for i, clip in tqdm(enumerate(list_speak), total=len(list_speak)):
332
+ selected_speaker = clip[0]
333
+ # Add pause break between speakers
334
+ if i > 0 and selected_speaker != prev_speaker:
335
+ all_parts += [silencelong.copy()]
336
+ prev_speaker = selected_speaker
337
+ text = clip[1]
338
+ text = saxutils.unescape(text)
339
+ if selected_speaker == "None":
340
+ selected_speaker = None
341
+
342
+ print(f"\nGenerating Text ({i+1}/{len(list_speak)}) -> {selected_speaker} (Seed {currentseed}):`{text}`")
343
+ complete_text += text
344
+ with pytorch_seed.SavedRNG(currentseed):
345
+ audio_array = generate_with_settings(text_prompt=text, voice_name=selected_speaker, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob)
346
+ currentseed = torch.random.initial_seed()
347
+ if len(list_speak) > 1:
348
+ filename = create_filename(settings.output_folder_path, currentseed, "audioclip",".wav")
349
+ save_wav(audio_array, filename)
350
+ add_id3_tag(filename, text, selected_speaker, currentseed)
351
+
352
+ all_parts += [audio_array]
353
+ else:
354
+ texts = split_and_recombine_text(text, settings.input_text_desired_length, settings.input_text_max_length)
355
+ for i, text in tqdm(enumerate(texts), total=len(texts)):
356
+ print(f"\nGenerating Text ({i+1}/{len(texts)}) -> {selected_speaker} (Seed {currentseed}):`{text}`")
357
+ complete_text += text
358
+ if quick_generation == True:
359
+ with pytorch_seed.SavedRNG(currentseed):
360
+ audio_array = generate_with_settings(text_prompt=text, voice_name=selected_speaker, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob)
361
+ currentseed = torch.random.initial_seed()
362
+ else:
363
+ full_output = use_last_generation_as_history or save_last_generation
364
+ if full_output:
365
+ full_generation, audio_array = generate_with_settings(text_prompt=text, voice_name=voice_name, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob, output_full=True)
366
+ else:
367
+ audio_array = generate_with_settings(text_prompt=text, voice_name=voice_name, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob)
368
+
369
+ # Noticed this in the HF Demo - convert to 16bit int -32767/32767 - most used audio format
370
+ # audio_array = (audio_array * 32767).astype(np.int16)
371
+
372
+ if len(texts) > 1:
373
+ filename = create_filename(settings.output_folder_path, currentseed, "audioclip",".wav")
374
+ save_wav(audio_array, filename)
375
+ add_id3_tag(filename, text, selected_speaker, currentseed)
376
+
377
+ if quick_generation == False and (save_last_generation == True or use_last_generation_as_history == True):
378
+ # save to npz
379
+ voice_name = create_filename(settings.output_folder_path, seed, "audioclip", ".npz")
380
+ save_as_prompt(voice_name, full_generation)
381
+ if use_last_generation_as_history:
382
+ selected_speaker = voice_name
383
+
384
+ all_parts += [audio_array]
385
+ # Add short pause between sentences
386
+ if text[-1] in "!?.\n" and i > 1:
387
+ all_parts += [silenceshort.copy()]
388
+
389
+ # save & play audio
390
+ result = create_filename(settings.output_folder_path, currentseed, "final",".wav")
391
+ save_wav(np.concatenate(all_parts), result)
392
+ # write id3 tag with text truncated to 60 chars, as a precaution...
393
+ add_id3_tag(result, complete_text, selected_speaker, currentseed)
394
+
395
+ return result
396
+
397
+
398
+
399
+ def save_wav(audio_array, filename):
400
+ write_wav(filename, SAMPLE_RATE, audio_array)
401
+
402
+ def save_voice(filename, semantic_prompt, coarse_prompt, fine_prompt):
403
+ np.savez_compressed(
404
+ filename,
405
+ semantic_prompt=semantic_prompt,
406
+ coarse_prompt=coarse_prompt,
407
+ fine_prompt=fine_prompt
408
+ )
409
+
410
+
411
+ def on_quick_gen_changed(checkbox):
412
+ if checkbox == False:
413
+ return gr.CheckboxGroup.update(visible=True)
414
+ return gr.CheckboxGroup.update(visible=False)
415
+
416
+ def delete_output_files(checkbox_state):
417
+ if checkbox_state:
418
+ outputs_folder = os.path.join(os.getcwd(), settings.output_folder_path)
419
+ if os.path.exists(outputs_folder):
420
+ purgedir(outputs_folder)
421
+ return False
422
+
423
+
424
+ # https://stackoverflow.com/a/54494779
425
+ def purgedir(parent):
426
+ for root, dirs, files in os.walk(parent):
427
+ for item in files:
428
+ # Delete subordinate files
429
+ filespec = os.path.join(root, item)
430
+ os.unlink(filespec)
431
+ for item in dirs:
432
+ # Recursively perform this operation for subordinate directories
433
+ purgedir(os.path.join(root, item))
434
+
435
+ def convert_text_to_ssml(text, selected_speaker):
436
+ return build_ssml(text, selected_speaker)
437
+
438
+
439
+ def training_prepare(selected_step, num_text_generations, progress=gr.Progress(track_tqdm=True)):
440
+ if selected_step == prepare_training_list[0]:
441
+ prepare_semantics_from_text()
442
+ else:
443
+ prepare_wavs_from_semantics()
444
+ return None
445
+
446
+
447
+ def start_training(save_model_epoch, max_epochs, progress=gr.Progress(track_tqdm=True)):
448
+ training_prepare_files("./training/data/", "./training/data/checkpoint/hubert_base_ls960.pt")
449
+ train("./training/data/", save_model_epoch, max_epochs)
450
+ return None
451
+
452
+
453
+
454
+ def apply_settings(themes, input_server_name, input_server_port, input_server_public, input_desired_len, input_max_len, input_silence_break, input_silence_speaker):
455
+ settings.selected_theme = themes
456
+ settings.server_name = input_server_name
457
+ settings.server_port = input_server_port
458
+ settings.server_share = input_server_public
459
+ settings.input_text_desired_length = input_desired_len
460
+ settings.input_text_max_length = input_max_len
461
+ settings.silence_sentence = input_silence_break
462
+ settings.silence_speaker = input_silence_speaker
463
+ settings.save()
464
+
465
+ def restart():
466
+ global restart_server
467
+ restart_server = True
468
+
469
+
470
+ def create_version_html():
471
+ python_version = ".".join([str(x) for x in sys.version_info[0:3]])
472
+ versions_html = f"""
473
+ python: <span title="{sys.version}">{python_version}</span>
474
+  • 
475
+ torch: {getattr(torch, '__long_version__',torch.__version__)}
476
+  • 
477
+ gradio: {gr.__version__}
478
+ """
479
+ return versions_html
480
+
481
+
482
+
483
+ logger = logging.getLogger(__name__)
484
+ APPTITLE = "Bark Voice Cloning UI"
485
+
486
+
487
+ autolaunch = False
488
+
489
+ if len(sys.argv) > 1:
490
+ autolaunch = "-autolaunch" in sys.argv
491
+
492
+ if torch.cuda.is_available() == False:
493
+ os.environ['BARK_FORCE_CPU'] = 'True'
494
+ logger.warning("No CUDA detected, fallback to CPU!")
495
+
496
+ print(f'smallmodels={os.environ.get("SUNO_USE_SMALL_MODELS", False)}')
497
+ print(f'enablemps={os.environ.get("SUNO_ENABLE_MPS", False)}')
498
+ print(f'offloadcpu={os.environ.get("SUNO_OFFLOAD_CPU", False)}')
499
+ print(f'forcecpu={os.environ.get("BARK_FORCE_CPU", False)}')
500
+ print(f'autolaunch={autolaunch}\n\n')
501
+
502
+ #print("Updating nltk\n")
503
+ #nltk.download('punkt')
504
+
505
+ print("Preloading Models\n")
506
+ preload_models()
507
+
508
+ available_themes = ["Default", "gradio/glass", "gradio/monochrome", "gradio/seafoam", "gradio/soft", "gstaff/xkcd", "freddyaboulton/dracula_revamped", "ysharma/steampunk"]
509
+ tokenizer_language_list = ["de","en", "pl"]
510
+ prepare_training_list = ["Step 1: Semantics from Text","Step 2: WAV from Semantics"]
511
+
512
+ seed = -1
513
+ server_name = settings.server_name
514
+ if len(server_name) < 1:
515
+ server_name = None
516
+ server_port = settings.server_port
517
+ if server_port <= 0:
518
+ server_port = None
519
+ global run_server
520
+ global restart_server
521
+
522
+ run_server = True
523
+
524
+ while run_server:
525
+ # Collect all existing speakers/voices in dir
526
+ speakers_list = []
527
+
528
+ for root, dirs, files in os.walk("./bark/assets/prompts"):
529
+ for file in files:
530
+ if file.endswith(".npz"):
531
+ pathpart = root.replace("./bark/assets/prompts", "")
532
+ name = os.path.join(pathpart, file[:-4])
533
+ if name.startswith("/") or name.startswith("\\"):
534
+ name = name[1:]
535
+ speakers_list.append(name)
536
+
537
+ speakers_list = sorted(speakers_list, key=lambda x: x.lower())
538
+ speakers_list.insert(0, 'None')
539
+
540
+ print(f'Launching {APPTITLE} Server')
541
+
542
+ # Create Gradio Blocks
543
+
544
+ with gr.Blocks(title=f"{APPTITLE}", mode=f"{APPTITLE}", theme=settings.selected_theme) as barkgui:
545
+ gr.Markdown("# <center>🐶🎶⭐ - Bark Voice Cloning</center>")
546
+ gr.Markdown("## <center>🤗 - If you like this space, please star my [github repo](https://github.com/KevinWang676/Bark-Voice-Cloning)</center>")
547
+ gr.Markdown("### <center>🎡 - Based on [bark-gui](https://github.com/C0untFloyd/bark-gui)</center>")
548
+ gr.Markdown(f""" You can duplicate and use it with a GPU: <a href="https://huggingface.co/spaces/{os.getenv('SPACE_ID')}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a>
549
+ or open in [Colab](https://colab.research.google.com/github/KevinWang676/Bark-Voice-Cloning/blob/main/Bark_Voice_Cloning.ipynb) for quick start 🌟 P.S. Voice cloning needs a GPU, but TTS doesn't 😄
550
+ """)
551
+
552
+ with gr.Tab("🎙️ - Clone Voice"):
553
+ with gr.Row():
554
+ input_audio_filename = gr.Audio(label="Input audio.wav", source="upload", type="filepath")
555
+ #transcription_text = gr.Textbox(label="Transcription Text", lines=1, placeholder="Enter Text of your Audio Sample here...")
556
+ with gr.Row():
557
+ with gr.Column():
558
+ initialname = "/home/user/app/bark/assets/prompts/file"
559
+ output_voice = gr.Textbox(label="Filename of trained Voice (do not change the initial name)", lines=1, placeholder=initialname, value=initialname, visible=False)
560
+ with gr.Column():
561
+ tokenizerlang = gr.Dropdown(tokenizer_language_list, label="Base Language Tokenizer", value=tokenizer_language_list[1], visible=False)
562
+ with gr.Row():
563
+ clone_voice_button = gr.Button("Create Voice", variant="primary")
564
+ with gr.Row():
565
+ dummy = gr.Text(label="Progress")
566
+ npz_file = gr.File(label=".npz file")
567
+ speakers_list.insert(0, npz_file) # add prompt
568
+
569
+ with gr.Tab("🎵 - TTS"):
570
+ with gr.Row():
571
+ with gr.Column():
572
+ placeholder = "Enter text here."
573
+ input_text = gr.Textbox(label="Input Text", lines=4, placeholder=placeholder)
574
+ convert_to_ssml_button = gr.Button("Convert Input Text to SSML")
575
+ with gr.Column():
576
+ seedcomponent = gr.Number(label="Seed (default -1 = Random)", precision=0, value=-1)
577
+ batchcount = gr.Number(label="Batch count", precision=0, value=1)
578
+
579
+ with gr.Row():
580
+ with gr.Column():
581
+ gr.Markdown("[Voice Prompt Library](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c)")
582
+ speaker = gr.Dropdown(speakers_list, value=speakers_list[0], label="Voice (Choose “file” if you wanna use the custom voice)")
583
+
584
+ with gr.Column():
585
+ text_temp = gr.Slider(0.1, 1.0, value=0.6, label="Generation Temperature", info="1.0 more diverse, 0.1 more conservative")
586
+ waveform_temp = gr.Slider(0.1, 1.0, value=0.7, label="Waveform temperature", info="1.0 more diverse, 0.1 more conservative")
587
+
588
+ with gr.Row():
589
+ with gr.Column():
590
+ quick_gen_checkbox = gr.Checkbox(label="Quick Generation", value=True)
591
+ settings_checkboxes = ["Use last generation as history", "Save generation as Voice"]
592
+ complete_settings = gr.CheckboxGroup(choices=settings_checkboxes, value=settings_checkboxes, label="Detailed Generation Settings", type="value", interactive=True, visible=False)
593
+ with gr.Column():
594
+ eos_prob = gr.Slider(0.0, 0.5, value=0.05, label="End of sentence probability")
595
+
596
+ with gr.Row():
597
+ with gr.Column():
598
+ tts_create_button = gr.Button("Generate", variant="primary")
599
+ with gr.Column():
600
+ hidden_checkbox = gr.Checkbox(visible=False)
601
+ button_stop_generation = gr.Button("Stop generation")
602
+ with gr.Row():
603
+ output_audio = gr.Audio(label="Generated Audio", type="filepath")
604
+
605
+ with gr.Row():
606
+ with gr.Column():
607
+ radio = gr.Radio(
608
+ ["mic", "file"], value="file", label="How would you like to upload your audio?", visible=False
609
+ )
610
+ mic_input = gr.Mic(label="Input", type="filepath", visible=False)
611
+ audio_file = output_audio
612
+ inputs = [
613
+ audio_file,
614
+ gr.Dropdown(
615
+ label="Add background noise",
616
+ choices=list(NOISES.keys()),
617
+ value="None", visible =False,
618
+ ),
619
+ gr.Dropdown(
620
+ label="Noise Level (SNR)",
621
+ choices=["-5", "0", "10", "20"],
622
+ value="0", visible =False,
623
+ ),
624
+ mic_input,
625
+ ]
626
+ btn_denoise = gr.Button("Denoise", variant="primary")
627
+ with gr.Column():
628
+ outputs = [
629
+ gr.Audio(type="filepath", label="Enhanced audio"),
630
+ ]
631
+ btn_denoise.click(fn=demo_fn, inputs=inputs, outputs=outputs)
632
+ radio.change(toggle, radio, [mic_input, audio_file])
633
+
634
+ with gr.Tab("🔮 - Voice Conversion"):
635
+ with gr.Row():
636
+ swap_audio_filename = gr.Audio(label="Input audio.wav to swap voice", source="upload", type="filepath")
637
+ with gr.Row():
638
+ with gr.Column():
639
+ swap_tokenizer_lang = gr.Dropdown(tokenizer_language_list, label="Base Language Tokenizer", value=tokenizer_language_list[1])
640
+ swap_seed = gr.Number(label="Seed (default -1 = Random)", precision=0, value=-1)
641
+ with gr.Column():
642
+ speaker_swap = gr.Dropdown(speakers_list, value=speakers_list[0], label="Voice (Choose “file” if you wanna use the custom voice)")
643
+ swap_batchcount = gr.Number(label="Batch count", precision=0, value=1)
644
+ with gr.Row():
645
+ swap_voice_button = gr.Button("Generate", variant="primary")
646
+ with gr.Row():
647
+ output_swap = gr.Audio(label="Generated Audio", type="filepath")
648
+
649
+
650
+ quick_gen_checkbox.change(fn=on_quick_gen_changed, inputs=quick_gen_checkbox, outputs=complete_settings)
651
+ convert_to_ssml_button.click(convert_text_to_ssml, inputs=[input_text, speaker],outputs=input_text)
652
+ gen_click = tts_create_button.click(generate_text_to_speech, inputs=[input_text, speaker, text_temp, waveform_temp, eos_prob, quick_gen_checkbox, complete_settings, seedcomponent, batchcount],outputs=output_audio)
653
+ button_stop_generation.click(fn=None, inputs=None, outputs=None, cancels=[gen_click])
654
+
655
+
656
+
657
+ swap_voice_button.click(swap_voice_from_audio, inputs=[swap_audio_filename, speaker_swap, swap_tokenizer_lang, swap_seed, swap_batchcount], outputs=output_swap)
658
+ clone_voice_button.click(clone_voice, inputs=[input_audio_filename, output_voice], outputs=[dummy, npz_file])
659
+
660
+
661
+ restart_server = False
662
+ try:
663
+ barkgui.queue().launch(show_error=True)
664
+ except:
665
+ restart_server = True
666
+ run_server = False
667
+ try:
668
+ while restart_server == False:
669
+ time.sleep(1.0)
670
+ except (KeyboardInterrupt, OSError):
671
+ print("Keyboard interruption in main thread... closing server.")
672
+ run_server = False
673
+ barkgui.close()
674
+
bark/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .api import generate_audio, text_to_semantic, semantic_to_waveform, save_as_prompt
2
+ from .generation import SAMPLE_RATE, preload_models
bark/api.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Optional, Union
2
+
3
+ import numpy as np
4
+
5
+ from .generation import codec_decode, generate_coarse, generate_fine, generate_text_semantic
6
+
7
+
8
+ def generate_with_settings(text_prompt, semantic_temp=0.6, eos_p=0.2, coarse_temp=0.7, fine_temp=0.5, voice_name=None, output_full=False):
9
+
10
+ # generation with more control
11
+ x_semantic = generate_text_semantic(
12
+ text_prompt,
13
+ history_prompt=voice_name,
14
+ temp=semantic_temp,
15
+ min_eos_p = eos_p,
16
+ use_kv_caching=True
17
+ )
18
+
19
+ x_coarse_gen = generate_coarse(
20
+ x_semantic,
21
+ history_prompt=voice_name,
22
+ temp=coarse_temp,
23
+ use_kv_caching=True
24
+ )
25
+ x_fine_gen = generate_fine(
26
+ x_coarse_gen,
27
+ history_prompt=voice_name,
28
+ temp=fine_temp,
29
+ )
30
+
31
+ if output_full:
32
+ full_generation = {
33
+ 'semantic_prompt': x_semantic,
34
+ 'coarse_prompt': x_coarse_gen,
35
+ 'fine_prompt': x_fine_gen
36
+ }
37
+ return full_generation, codec_decode(x_fine_gen)
38
+ return codec_decode(x_fine_gen)
39
+
40
+
41
+ def text_to_semantic(
42
+ text: str,
43
+ history_prompt: Optional[Union[Dict, str]] = None,
44
+ temp: float = 0.7,
45
+ silent: bool = False,
46
+ ):
47
+ """Generate semantic array from text.
48
+
49
+ Args:
50
+ text: text to be turned into audio
51
+ history_prompt: history choice for audio cloning
52
+ temp: generation temperature (1.0 more diverse, 0.0 more conservative)
53
+ silent: disable progress bar
54
+
55
+ Returns:
56
+ numpy semantic array to be fed into `semantic_to_waveform`
57
+ """
58
+ x_semantic = generate_text_semantic(
59
+ text,
60
+ history_prompt=history_prompt,
61
+ temp=temp,
62
+ silent=silent,
63
+ use_kv_caching=True
64
+ )
65
+ return x_semantic
66
+
67
+
68
+ def semantic_to_waveform(
69
+ semantic_tokens: np.ndarray,
70
+ history_prompt: Optional[Union[Dict, str]] = None,
71
+ temp: float = 0.7,
72
+ silent: bool = False,
73
+ output_full: bool = False,
74
+ ):
75
+ """Generate audio array from semantic input.
76
+
77
+ Args:
78
+ semantic_tokens: semantic token output from `text_to_semantic`
79
+ history_prompt: history choice for audio cloning
80
+ temp: generation temperature (1.0 more diverse, 0.0 more conservative)
81
+ silent: disable progress bar
82
+ output_full: return full generation to be used as a history prompt
83
+
84
+ Returns:
85
+ numpy audio array at sample frequency 24khz
86
+ """
87
+ coarse_tokens = generate_coarse(
88
+ semantic_tokens,
89
+ history_prompt=history_prompt,
90
+ temp=temp,
91
+ silent=silent,
92
+ use_kv_caching=True
93
+ )
94
+ fine_tokens = generate_fine(
95
+ coarse_tokens,
96
+ history_prompt=history_prompt,
97
+ temp=0.5,
98
+ )
99
+ audio_arr = codec_decode(fine_tokens)
100
+ if output_full:
101
+ full_generation = {
102
+ "semantic_prompt": semantic_tokens,
103
+ "coarse_prompt": coarse_tokens,
104
+ "fine_prompt": fine_tokens,
105
+ }
106
+ return full_generation, audio_arr
107
+ return audio_arr
108
+
109
+
110
+ def save_as_prompt(filepath, full_generation):
111
+ assert(filepath.endswith(".npz"))
112
+ assert(isinstance(full_generation, dict))
113
+ assert("semantic_prompt" in full_generation)
114
+ assert("coarse_prompt" in full_generation)
115
+ assert("fine_prompt" in full_generation)
116
+ np.savez(filepath, **full_generation)
117
+
118
+
119
+ def generate_audio(
120
+ text: str,
121
+ history_prompt: Optional[Union[Dict, str]] = None,
122
+ text_temp: float = 0.7,
123
+ waveform_temp: float = 0.7,
124
+ silent: bool = False,
125
+ output_full: bool = False,
126
+ ):
127
+ """Generate audio array from input text.
128
+
129
+ Args:
130
+ text: text to be turned into audio
131
+ history_prompt: history choice for audio cloning
132
+ text_temp: generation temperature (1.0 more diverse, 0.0 more conservative)
133
+ waveform_temp: generation temperature (1.0 more diverse, 0.0 more conservative)
134
+ silent: disable progress bar
135
+ output_full: return full generation to be used as a history prompt
136
+
137
+ Returns:
138
+ numpy audio array at sample frequency 24khz
139
+ """
140
+ semantic_tokens = text_to_semantic(
141
+ text,
142
+ history_prompt=history_prompt,
143
+ temp=text_temp,
144
+ silent=silent,
145
+ )
146
+ out = semantic_to_waveform(
147
+ semantic_tokens,
148
+ history_prompt=history_prompt,
149
+ temp=waveform_temp,
150
+ silent=silent,
151
+ output_full=output_full,
152
+ )
153
+ if output_full:
154
+ full_generation, audio_arr = out
155
+ return full_generation, audio_arr
156
+ else:
157
+ audio_arr = out
158
+ return audio_arr
bark/assets/prompts/announcer.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26f2d1a9e3b6fe453cf5fc8191de26cbfae6276c5b0f7c376c6a0f3c35867f83
3
+ size 16794
bark/assets/prompts/v2/en_speaker_0.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:932f40d879ba8659f1ca26319ba64ea3b0647b2050fe24313bf42b0dff1fe241
3
+ size 28100
bark/assets/prompts/v2/en_speaker_1.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e7f18015e1ab9b6302ded1e28a971af5306a72f193bb6c411f1948a083c8578
3
+ size 25220
bark/assets/prompts/v2/en_speaker_2.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d218990680ece5f2d4fc18ea4783b016b3ae353ec413eaee2058f2d57263c9b3
3
+ size 26236
bark/assets/prompts/v2/en_speaker_3.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92c2e2a29145c83738e9b63f082fd1c873d9422468a155463cb27f814aeaea66
3
+ size 34980
bark/assets/prompts/v2/en_speaker_4.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:992f91991a9a5359d72f00b09a11a550e71bb8ebfc0cfd877e39d7d41f98b714
3
+ size 23780
bark/assets/prompts/v2/en_speaker_5.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18831c3f6014e4a2ff60ad5169b1fae06e28ed07f43f8a3616aafb84515091bf
3
+ size 24740
bark/assets/prompts/v2/en_speaker_6.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fab38dc6b6bc9226bcc414f4c5a9524bc1b2441865a586153fb620127a8faa4e
3
+ size 25540
bark/assets/prompts/v2/en_speaker_7.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f4c4eb33f5994be8de5cfd1744ebce13da1618a6da3a7d244514178c61ef7db
3
+ size 22716
bark/assets/prompts/v2/en_speaker_8.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fc9f11b539588f51bbf78150a73e0365c49b2306bd72e5a22b28ef09c4fb15d
3
+ size 23300
bark/assets/prompts/v2/en_speaker_9.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78b3ba32eb9aeb9ed34556856c40633ecc8332d1c3ae3c81e6f5015ac3eefbd5
3
+ size 30180
bark/assets/prompts/v2/zh_speaker_0.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd7ac118a3e944b3f20c89f2446056a00850a630ee16318922acc6572ce80929
3
+ size 20636
bark/assets/prompts/v2/zh_speaker_1.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eacf5c862dfd3c5ac825f2ebb26f323e64309cb712e7e264cbd31c5bca3f038
3
+ size 19836
bark/assets/prompts/v2/zh_speaker_2.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e324b47f8250e5798c314f395d4e049575e7ca369d0b6074e91c7bba70e9f26d
3
+ size 21060
bark/assets/prompts/v2/zh_speaker_3.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98c476abc7bf634ffb2d71d363284e7bd8c8abd5e33ec5ca21d4aa5b15730d18
3
+ size 31300
bark/assets/prompts/v2/zh_speaker_4.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fa8673a9895ad3302d13ac94193b5ad5da481f1cc276e6181fa895acaae133b
3
+ size 29964
bark/assets/prompts/v2/zh_speaker_5.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:226edfe5fabc72eeb83a13e350599bc8babe5adc2264b3cdb661fd1258dc4044
3
+ size 17436
bark/assets/prompts/v2/zh_speaker_6.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:285d51fbe81cc263636b5b487fbb6633e6f3cf92c53ca9ab8e6b7f55d4b4a31d
3
+ size 16900
bark/assets/prompts/v2/zh_speaker_7.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0967cdb14ffa79895747b0d52df9f15bdad80d6c55b7630894345c9a7ec87c91
3
+ size 21060
bark/assets/prompts/v2/zh_speaker_8.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c028f78530013f29ab8c0c1cf4fe2138106fbe5252951f5f36e0168056779549
3
+ size 19300
bark/assets/prompts/v2/zh_speaker_9.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6265bb827008d7af8a45a8e057fe3e91efb347d56208180a9ed990ad54e4d75e
3
+ size 16156
bark/generation.py ADDED
@@ -0,0 +1,864 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import gc
3
+ import os
4
+ import re
5
+ import requests
6
+ import gc
7
+ import sys
8
+
9
+ from encodec import EncodecModel
10
+ import funcy
11
+ import logging
12
+ import numpy as np
13
+ from scipy.special import softmax
14
+ import torch
15
+ import torch.nn.functional as F
16
+ import tqdm
17
+ from transformers import BertTokenizer
18
+ from huggingface_hub import hf_hub_download, hf_hub_url
19
+
20
+ from .model import GPTConfig, GPT
21
+ from .model_fine import FineGPT, FineGPTConfig
22
+ from .settings import initenv
23
+
24
+ initenv(sys.argv)
25
+ global_force_cpu = os.environ.get("BARK_FORCE_CPU", False)
26
+ if (
27
+ global_force_cpu != True and
28
+ torch.cuda.is_available() and
29
+ hasattr(torch.cuda, "amp") and
30
+ hasattr(torch.cuda.amp, "autocast") and
31
+ hasattr(torch.cuda, "is_bf16_supported") and
32
+ torch.cuda.is_bf16_supported()
33
+ ):
34
+ autocast = funcy.partial(torch.cuda.amp.autocast, dtype=torch.bfloat16)
35
+ else:
36
+ @contextlib.contextmanager
37
+ def autocast():
38
+ yield
39
+
40
+
41
+ # hold models in global scope to lazy load
42
+ global models
43
+ models = {}
44
+
45
+ global models_devices
46
+ models_devices = {}
47
+
48
+
49
+ CONTEXT_WINDOW_SIZE = 1024
50
+
51
+ SEMANTIC_RATE_HZ = 49.9
52
+ SEMANTIC_VOCAB_SIZE = 10_000
53
+
54
+ CODEBOOK_SIZE = 1024
55
+ N_COARSE_CODEBOOKS = 2
56
+ N_FINE_CODEBOOKS = 8
57
+ COARSE_RATE_HZ = 75
58
+
59
+ SAMPLE_RATE = 24_000
60
+
61
+
62
+ SUPPORTED_LANGS = [
63
+ ("English", "en"),
64
+ ("German", "de"),
65
+ ("Spanish", "es"),
66
+ ("French", "fr"),
67
+ ("Hindi", "hi"),
68
+ ("Italian", "it"),
69
+ ("Japanese", "ja"),
70
+ ("Korean", "ko"),
71
+ ("Polish", "pl"),
72
+ ("Portuguese", "pt"),
73
+ ("Russian", "ru"),
74
+ ("Turkish", "tr"),
75
+ ("Chinese", "zh"),
76
+ ]
77
+
78
+ ALLOWED_PROMPTS = {"announcer"}
79
+ for _, lang in SUPPORTED_LANGS:
80
+ for prefix in ("", f"v2{os.path.sep}"):
81
+ for n in range(10):
82
+ ALLOWED_PROMPTS.add(f"{prefix}{lang}_speaker_{n}")
83
+
84
+
85
+ logger = logging.getLogger(__name__)
86
+
87
+
88
+ CUR_PATH = os.path.dirname(os.path.abspath(__file__))
89
+
90
+
91
+ #default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache")
92
+ #CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
93
+ #CACHE_DIR = os.path.join(os.getcwd(), "models"
94
+ CACHE_DIR = "./models"
95
+
96
+
97
+ def _cast_bool_env_var(s):
98
+ return s.lower() in ('true', '1', 't')
99
+
100
+ USE_SMALL_MODELS = _cast_bool_env_var(os.environ.get("SUNO_USE_SMALL_MODELS", "False"))
101
+ GLOBAL_ENABLE_MPS = _cast_bool_env_var(os.environ.get("SUNO_ENABLE_MPS", "False"))
102
+ OFFLOAD_CPU = _cast_bool_env_var(os.environ.get("SUNO_OFFLOAD_CPU", "False"))
103
+
104
+ REMOTE_MODEL_PATHS = {
105
+ "text_small": {
106
+ "repo_id": "suno/bark",
107
+ "file_name": "text.pt",
108
+ },
109
+ "coarse_small": {
110
+ "repo_id": "suno/bark",
111
+ "file_name": "coarse.pt",
112
+ },
113
+ "fine_small": {
114
+ "repo_id": "suno/bark",
115
+ "file_name": "fine.pt",
116
+ },
117
+ "text": {
118
+ "repo_id": "suno/bark",
119
+ "file_name": "text_2.pt",
120
+ },
121
+ "coarse": {
122
+ "repo_id": "suno/bark",
123
+ "file_name": "coarse_2.pt",
124
+ },
125
+ "fine": {
126
+ "repo_id": "suno/bark",
127
+ "file_name": "fine_2.pt",
128
+ },
129
+ }
130
+
131
+
132
+ if not hasattr(torch.nn.functional, 'scaled_dot_product_attention') and torch.cuda.is_available():
133
+ logger.warning(
134
+ "torch version does not support flash attention. You will get faster" +
135
+ " inference speed by upgrade torch to newest nightly version."
136
+ )
137
+
138
+
139
+ def grab_best_device(use_gpu=True):
140
+ if torch.cuda.device_count() > 0 and use_gpu:
141
+ device = "cuda"
142
+ elif torch.backends.mps.is_available() and use_gpu and GLOBAL_ENABLE_MPS:
143
+ device = "mps"
144
+ else:
145
+ device = "cpu"
146
+ return device
147
+
148
+
149
+ def _get_ckpt_path(model_type, use_small=False):
150
+ key = model_type
151
+ if use_small or USE_SMALL_MODELS:
152
+ key += "_small"
153
+ return os.path.join(CACHE_DIR, REMOTE_MODEL_PATHS[key]["file_name"])
154
+
155
+ """
156
+ def _download(from_hf_path, file_name, destfilename):
157
+ os.makedirs(CACHE_DIR, exist_ok=True)
158
+ hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR, local_dir_use_symlinks=False)
159
+ # Bug in original repo? Downloaded name differs from expected...
160
+ if not os.path.exists(destfilename):
161
+ localname = os.path.join(CACHE_DIR, file_name)
162
+ os.rename(localname, destfilename)
163
+ """
164
+ def _download(from_hf_path, file_name):
165
+ os.makedirs(CACHE_DIR, exist_ok=True)
166
+ hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR)
167
+
168
+
169
+ class InferenceContext:
170
+ def __init__(self, benchmark=False):
171
+ # we can't expect inputs to be the same length, so disable benchmarking by default
172
+ self._chosen_cudnn_benchmark = benchmark
173
+ self._cudnn_benchmark = None
174
+
175
+ def __enter__(self):
176
+ self._cudnn_benchmark = torch.backends.cudnn.benchmark
177
+ torch.backends.cudnn.benchmark = self._chosen_cudnn_benchmark
178
+
179
+ def __exit__(self, exc_type, exc_value, exc_traceback):
180
+ torch.backends.cudnn.benchmark = self._cudnn_benchmark
181
+
182
+
183
+ if torch.cuda.is_available():
184
+ torch.backends.cuda.matmul.allow_tf32 = True
185
+ torch.backends.cudnn.allow_tf32 = True
186
+
187
+
188
+ @contextlib.contextmanager
189
+ def _inference_mode():
190
+ with InferenceContext(), torch.inference_mode(), torch.no_grad(), autocast():
191
+ yield
192
+
193
+
194
+ def _clear_cuda_cache():
195
+ if torch.cuda.is_available():
196
+ torch.cuda.empty_cache()
197
+ torch.cuda.synchronize()
198
+
199
+
200
+ def clean_models(model_key=None):
201
+ global models
202
+ model_keys = [model_key] if model_key is not None else models.keys()
203
+ for k in model_keys:
204
+ if k in models:
205
+ del models[k]
206
+ _clear_cuda_cache()
207
+ gc.collect()
208
+
209
+
210
+ def _load_model(ckpt_path, device, use_small=False, model_type="text"):
211
+ if model_type == "text":
212
+ ConfigClass = GPTConfig
213
+ ModelClass = GPT
214
+ elif model_type == "coarse":
215
+ ConfigClass = GPTConfig
216
+ ModelClass = GPT
217
+ elif model_type == "fine":
218
+ ConfigClass = FineGPTConfig
219
+ ModelClass = FineGPT
220
+ else:
221
+ raise NotImplementedError()
222
+
223
+ # Force-remove Models to allow running on >12Gb GPU
224
+ # CF: Probably not needed anymore
225
+ #global models
226
+ #models.clear()
227
+ #gc.collect()
228
+ #torch.cuda.empty_cache()
229
+ # to here...
230
+
231
+ model_key = f"{model_type}_small" if use_small or USE_SMALL_MODELS else model_type
232
+ model_info = REMOTE_MODEL_PATHS[model_key]
233
+ if not os.path.exists(ckpt_path):
234
+ logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`.")
235
+ ## added next two lines to make it super clear which model is being downloaded
236
+ remote_filename = hf_hub_url(model_info["repo_id"], model_info["file_name"])
237
+ print(f"Downloading {model_key} {model_info['repo_id']} remote model file {remote_filename} {model_info['file_name']} to {CACHE_DIR}")
238
+ _download(model_info["repo_id"], model_info["file_name"])
239
+ # add next line to make it super clear which model is being loaded
240
+ print(f"Loading {model_key} model from {ckpt_path} to {device}") # added
241
+ checkpoint = torch.load(ckpt_path, map_location=device)
242
+ # this is a hack
243
+ model_args = checkpoint["model_args"]
244
+ if "input_vocab_size" not in model_args:
245
+ model_args["input_vocab_size"] = model_args["vocab_size"]
246
+ model_args["output_vocab_size"] = model_args["vocab_size"]
247
+ del model_args["vocab_size"]
248
+ gptconf = ConfigClass(**checkpoint["model_args"])
249
+ model = ModelClass(gptconf)
250
+ state_dict = checkpoint["model"]
251
+ # fixup checkpoint
252
+ unwanted_prefix = "_orig_mod."
253
+ for k, v in list(state_dict.items()):
254
+ if k.startswith(unwanted_prefix):
255
+ state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k)
256
+ extra_keys = set(state_dict.keys()) - set(model.state_dict().keys())
257
+ extra_keys = set([k for k in extra_keys if not k.endswith(".attn.bias")])
258
+ missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())
259
+ missing_keys = set([k for k in missing_keys if not k.endswith(".attn.bias")])
260
+ if len(extra_keys) != 0:
261
+ raise ValueError(f"extra keys found: {extra_keys}")
262
+ if len(missing_keys) != 0:
263
+ raise ValueError(f"missing keys: {missing_keys}")
264
+ model.load_state_dict(state_dict, strict=False)
265
+ n_params = model.get_num_params()
266
+ val_loss = checkpoint["best_val_loss"].item()
267
+ logger.info(f"model loaded: {round(n_params/1e6,1)}M params, {round(val_loss,3)} loss")
268
+ model.eval()
269
+ model.to(device)
270
+ del checkpoint, state_dict
271
+ _clear_cuda_cache()
272
+ if model_type == "text":
273
+ tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased")
274
+ return {
275
+ "model": model,
276
+ "tokenizer": tokenizer,
277
+ }
278
+ return model
279
+
280
+
281
+ def _load_codec_model(device):
282
+ model = EncodecModel.encodec_model_24khz()
283
+ model.set_target_bandwidth(6.0)
284
+ model.eval()
285
+ model.to(device)
286
+ _clear_cuda_cache()
287
+ return model
288
+
289
+
290
+ def load_model(use_gpu=True, use_small=False, force_reload=False, model_type="text"):
291
+ _load_model_f = funcy.partial(_load_model, model_type=model_type, use_small=use_small)
292
+ if model_type not in ("text", "coarse", "fine"):
293
+ raise NotImplementedError()
294
+ global models
295
+ global models_devices
296
+ device = grab_best_device(use_gpu=use_gpu)
297
+ model_key = f"{model_type}"
298
+ if OFFLOAD_CPU:
299
+ models_devices[model_key] = device
300
+ device = "cpu"
301
+ if model_key not in models or force_reload:
302
+ ckpt_path = _get_ckpt_path(model_type, use_small=use_small)
303
+ clean_models(model_key=model_key)
304
+ model = _load_model_f(ckpt_path, device)
305
+ models[model_key] = model
306
+ if model_type == "text":
307
+ models[model_key]["model"].to(device)
308
+ else:
309
+ models[model_key].to(device)
310
+ return models[model_key]
311
+
312
+
313
+ def load_codec_model(use_gpu=True, force_reload=False):
314
+ global models
315
+ global models_devices
316
+ device = grab_best_device(use_gpu=use_gpu)
317
+ if device == "mps":
318
+ # encodec doesn't support mps
319
+ device = "cpu"
320
+ model_key = "codec"
321
+ if OFFLOAD_CPU:
322
+ models_devices[model_key] = device
323
+ device = "cpu"
324
+ if model_key not in models or force_reload:
325
+ clean_models(model_key=model_key)
326
+ model = _load_codec_model(device)
327
+ models[model_key] = model
328
+ models[model_key].to(device)
329
+ return models[model_key]
330
+
331
+
332
+ def preload_models(
333
+ text_use_gpu=True,
334
+ text_use_small=False,
335
+ coarse_use_gpu=True,
336
+ coarse_use_small=False,
337
+ fine_use_gpu=True,
338
+ fine_use_small=False,
339
+ codec_use_gpu=True,
340
+ force_reload=False
341
+ ):
342
+ """Load all the necessary models for the pipeline."""
343
+ if grab_best_device() == "cpu" and (
344
+ text_use_gpu or coarse_use_gpu or fine_use_gpu or codec_use_gpu
345
+ ):
346
+ logger.warning("No GPU being used. Careful, inference might be very slow!")
347
+ _ = load_model(
348
+ model_type="text", use_gpu=text_use_gpu, use_small=text_use_small, force_reload=force_reload
349
+ )
350
+ _ = load_model(
351
+ model_type="coarse",
352
+ use_gpu=coarse_use_gpu,
353
+ use_small=coarse_use_small,
354
+ force_reload=force_reload,
355
+ )
356
+ _ = load_model(
357
+ model_type="fine", use_gpu=fine_use_gpu, use_small=fine_use_small, force_reload=force_reload
358
+ )
359
+ _ = load_codec_model(use_gpu=codec_use_gpu, force_reload=force_reload)
360
+
361
+
362
+ ####
363
+ # Generation Functionality
364
+ ####
365
+
366
+
367
+ def _tokenize(tokenizer, text):
368
+ return tokenizer.encode(text, add_special_tokens=False)
369
+
370
+
371
+ def _detokenize(tokenizer, enc_text):
372
+ return tokenizer.decode(enc_text)
373
+
374
+
375
+ def _normalize_whitespace(text):
376
+ return re.sub(r"\s+", " ", text).strip()
377
+
378
+
379
+ TEXT_ENCODING_OFFSET = 10_048
380
+ SEMANTIC_PAD_TOKEN = 10_000
381
+ TEXT_PAD_TOKEN = 129_595
382
+ SEMANTIC_INFER_TOKEN = 129_599
383
+
384
+
385
+ def _load_history_prompt(history_prompt_input):
386
+ if isinstance(history_prompt_input, str) and history_prompt_input.endswith(".npz"):
387
+ history_prompt = np.load(history_prompt_input)
388
+ elif isinstance(history_prompt_input, str):
389
+ # make sure this works on non-ubuntu
390
+ history_prompt_input = os.path.join(*history_prompt_input.split("/"))
391
+ # if history_prompt_input not in ALLOWED_PROMPTS:
392
+ # raise ValueError("history prompt not found")
393
+ history_prompt = np.load(
394
+ os.path.join(CUR_PATH, "assets", "prompts", f"{history_prompt_input}.npz")
395
+ )
396
+ elif isinstance(history_prompt_input, dict):
397
+ assert("semantic_prompt" in history_prompt_input)
398
+ assert("coarse_prompt" in history_prompt_input)
399
+ assert("fine_prompt" in history_prompt_input)
400
+ history_prompt = history_prompt_input
401
+ else:
402
+ raise ValueError("history prompt format unrecognized")
403
+ return history_prompt
404
+
405
+
406
+ def generate_text_semantic(
407
+ text,
408
+ history_prompt=None,
409
+ temp=0.7,
410
+ top_k=None,
411
+ top_p=None,
412
+ silent=False,
413
+ min_eos_p=0.2,
414
+ max_gen_duration_s=None,
415
+ allow_early_stop=True,
416
+ use_kv_caching=False,
417
+ ):
418
+ """Generate semantic tokens from text."""
419
+ assert isinstance(text, str)
420
+ text = _normalize_whitespace(text)
421
+ assert len(text.strip()) > 0
422
+ if history_prompt is not None:
423
+ history_prompt = _load_history_prompt(history_prompt)
424
+ semantic_history = history_prompt["semantic_prompt"]
425
+ assert (
426
+ isinstance(semantic_history, np.ndarray)
427
+ and len(semantic_history.shape) == 1
428
+ and len(semantic_history) > 0
429
+ and semantic_history.min() >= 0
430
+ and semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1
431
+ )
432
+ else:
433
+ semantic_history = None
434
+ # load models if not yet exist
435
+ global models
436
+ global models_devices
437
+ if "text" not in models:
438
+ preload_models()
439
+ model_container = models["text"]
440
+ model = model_container["model"]
441
+ tokenizer = model_container["tokenizer"]
442
+ encoded_text = np.array(_tokenize(tokenizer, text)) + TEXT_ENCODING_OFFSET
443
+ if OFFLOAD_CPU:
444
+ model.to(models_devices["text"])
445
+ device = next(model.parameters()).device
446
+ if len(encoded_text) > 256:
447
+ p = round((len(encoded_text) - 256) / len(encoded_text) * 100, 1)
448
+ logger.warning(f"warning, text too long, lopping of last {p}%")
449
+ encoded_text = encoded_text[:256]
450
+ encoded_text = np.pad(
451
+ encoded_text,
452
+ (0, 256 - len(encoded_text)),
453
+ constant_values=TEXT_PAD_TOKEN,
454
+ mode="constant",
455
+ )
456
+ if semantic_history is not None:
457
+ semantic_history = semantic_history.astype(np.int64)
458
+ # lop off if history is too long, pad if needed
459
+ semantic_history = semantic_history[-256:]
460
+ semantic_history = np.pad(
461
+ semantic_history,
462
+ (0, 256 - len(semantic_history)),
463
+ constant_values=SEMANTIC_PAD_TOKEN,
464
+ mode="constant",
465
+ )
466
+ else:
467
+ semantic_history = np.array([SEMANTIC_PAD_TOKEN] * 256)
468
+ x = torch.from_numpy(
469
+ np.hstack([
470
+ encoded_text, semantic_history, np.array([SEMANTIC_INFER_TOKEN])
471
+ ]).astype(np.int64)
472
+ )[None]
473
+ assert x.shape[1] == 256 + 256 + 1
474
+ with _inference_mode():
475
+ x = x.to(device)
476
+ n_tot_steps = 768
477
+ # custom tqdm updates since we don't know when eos will occur
478
+ pbar = tqdm.tqdm(disable=silent, total=100)
479
+ pbar_state = 0
480
+ tot_generated_duration_s = 0
481
+ kv_cache = None
482
+ for n in range(n_tot_steps):
483
+ if use_kv_caching and kv_cache is not None:
484
+ x_input = x[:, [-1]]
485
+ else:
486
+ x_input = x
487
+ logits, kv_cache = model(
488
+ x_input, merge_context=True, use_cache=use_kv_caching, past_kv=kv_cache
489
+ )
490
+ relevant_logits = logits[0, 0, :SEMANTIC_VOCAB_SIZE]
491
+ if allow_early_stop:
492
+ relevant_logits = torch.hstack(
493
+ (relevant_logits, logits[0, 0, [SEMANTIC_PAD_TOKEN]]) # eos
494
+ )
495
+ if top_p is not None:
496
+ # faster to convert to numpy
497
+ original_device = relevant_logits.device
498
+ relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy()
499
+ sorted_indices = np.argsort(relevant_logits)[::-1]
500
+ sorted_logits = relevant_logits[sorted_indices]
501
+ cumulative_probs = np.cumsum(softmax(sorted_logits))
502
+ sorted_indices_to_remove = cumulative_probs > top_p
503
+ sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy()
504
+ sorted_indices_to_remove[0] = False
505
+ relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf
506
+ relevant_logits = torch.from_numpy(relevant_logits)
507
+ relevant_logits = relevant_logits.to(original_device)
508
+ if top_k is not None:
509
+ v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1)))
510
+ relevant_logits[relevant_logits < v[-1]] = -float("Inf")
511
+ probs = F.softmax(relevant_logits / temp, dim=-1)
512
+ # multinomial bugged on mps: shuttle to cpu if necessary
513
+ inf_device = probs.device
514
+ if probs.device.type == "mps":
515
+ probs = probs.to("cpu")
516
+ item_next = torch.multinomial(probs, num_samples=1)
517
+ probs = probs.to(inf_device)
518
+ item_next = item_next.to(inf_device)
519
+ if allow_early_stop and (
520
+ item_next == SEMANTIC_VOCAB_SIZE
521
+ or (min_eos_p is not None and probs[-1] >= min_eos_p)
522
+ ):
523
+ # eos found, so break
524
+ pbar.update(100 - pbar_state)
525
+ break
526
+ x = torch.cat((x, item_next[None]), dim=1)
527
+ tot_generated_duration_s += 1 / SEMANTIC_RATE_HZ
528
+ if max_gen_duration_s is not None and tot_generated_duration_s > max_gen_duration_s:
529
+ pbar.update(100 - pbar_state)
530
+ break
531
+ if n == n_tot_steps - 1:
532
+ pbar.update(100 - pbar_state)
533
+ break
534
+ del logits, relevant_logits, probs, item_next
535
+ req_pbar_state = np.min([100, int(round(100 * n / n_tot_steps))])
536
+ if req_pbar_state > pbar_state:
537
+ pbar.update(req_pbar_state - pbar_state)
538
+ pbar_state = req_pbar_state
539
+ pbar.close()
540
+ out = x.detach().cpu().numpy().squeeze()[256 + 256 + 1 :]
541
+ if OFFLOAD_CPU:
542
+ model.to("cpu")
543
+ assert all(0 <= out) and all(out < SEMANTIC_VOCAB_SIZE)
544
+ _clear_cuda_cache()
545
+ return out
546
+
547
+
548
+ def _flatten_codebooks(arr, offset_size=CODEBOOK_SIZE):
549
+ assert len(arr.shape) == 2
550
+ arr = arr.copy()
551
+ if offset_size is not None:
552
+ for n in range(1, arr.shape[0]):
553
+ arr[n, :] += offset_size * n
554
+ flat_arr = arr.ravel("F")
555
+ return flat_arr
556
+
557
+
558
+ COARSE_SEMANTIC_PAD_TOKEN = 12_048
559
+ COARSE_INFER_TOKEN = 12_050
560
+
561
+
562
+ def generate_coarse(
563
+ x_semantic,
564
+ history_prompt=None,
565
+ temp=0.7,
566
+ top_k=None,
567
+ top_p=None,
568
+ silent=False,
569
+ max_coarse_history=630, # min 60 (faster), max 630 (more context)
570
+ sliding_window_len=60,
571
+ use_kv_caching=False,
572
+ ):
573
+ """Generate coarse audio codes from semantic tokens."""
574
+ # CF: Uncommented because it breaks swap voice more than once
575
+ # assert (
576
+ # isinstance(x_semantic, np.ndarray)
577
+ # and len(x_semantic.shape) == 1
578
+ # and len(x_semantic) > 0
579
+ # and x_semantic.min() >= 0
580
+ # and x_semantic.max() <= SEMANTIC_VOCAB_SIZE - 1
581
+ # )
582
+ assert 60 <= max_coarse_history <= 630
583
+ assert max_coarse_history + sliding_window_len <= 1024 - 256
584
+ semantic_to_coarse_ratio = COARSE_RATE_HZ / SEMANTIC_RATE_HZ * N_COARSE_CODEBOOKS
585
+ max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio))
586
+ if history_prompt is not None:
587
+ history_prompt = _load_history_prompt(history_prompt)
588
+ x_semantic_history = history_prompt["semantic_prompt"]
589
+ x_coarse_history = history_prompt["coarse_prompt"]
590
+ assert (
591
+ isinstance(x_semantic_history, np.ndarray)
592
+ and len(x_semantic_history.shape) == 1
593
+ and len(x_semantic_history) > 0
594
+ and x_semantic_history.min() >= 0
595
+ and x_semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1
596
+ and isinstance(x_coarse_history, np.ndarray)
597
+ and len(x_coarse_history.shape) == 2
598
+ and x_coarse_history.shape[0] == N_COARSE_CODEBOOKS
599
+ and x_coarse_history.shape[-1] >= 0
600
+ and x_coarse_history.min() >= 0
601
+ and x_coarse_history.max() <= CODEBOOK_SIZE - 1
602
+ #and (
603
+ # round(x_coarse_history.shape[-1] / len(x_semantic_history), 1)
604
+ # == round(semantic_to_coarse_ratio / N_COARSE_CODEBOOKS, 1)
605
+ #)
606
+ )
607
+ x_coarse_history = _flatten_codebooks(x_coarse_history) + SEMANTIC_VOCAB_SIZE
608
+ # trim histories correctly
609
+ n_semantic_hist_provided = np.min(
610
+ [
611
+ max_semantic_history,
612
+ len(x_semantic_history) - len(x_semantic_history) % 2,
613
+ int(np.floor(len(x_coarse_history) / semantic_to_coarse_ratio)),
614
+ ]
615
+ )
616
+ n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio))
617
+ x_semantic_history = x_semantic_history[-n_semantic_hist_provided:].astype(np.int32)
618
+ x_coarse_history = x_coarse_history[-n_coarse_hist_provided:].astype(np.int32)
619
+ # TODO: bit of a hack for time alignment (sounds better)
620
+ x_coarse_history = x_coarse_history[:-2]
621
+ else:
622
+ x_semantic_history = np.array([], dtype=np.int32)
623
+ x_coarse_history = np.array([], dtype=np.int32)
624
+ # load models if not yet exist
625
+ global models
626
+ global models_devices
627
+ if "coarse" not in models:
628
+ preload_models()
629
+ model = models["coarse"]
630
+ if OFFLOAD_CPU:
631
+ model.to(models_devices["coarse"])
632
+ device = next(model.parameters()).device
633
+ # start loop
634
+ n_steps = int(
635
+ round(
636
+ np.floor(len(x_semantic) * semantic_to_coarse_ratio / N_COARSE_CODEBOOKS)
637
+ * N_COARSE_CODEBOOKS
638
+ )
639
+ )
640
+ assert n_steps > 0 and n_steps % N_COARSE_CODEBOOKS == 0
641
+ x_semantic = np.hstack([x_semantic_history, x_semantic]).astype(np.int32)
642
+ x_coarse = x_coarse_history.astype(np.int32)
643
+ base_semantic_idx = len(x_semantic_history)
644
+ with _inference_mode():
645
+ x_semantic_in = torch.from_numpy(x_semantic)[None].to(device)
646
+ x_coarse_in = torch.from_numpy(x_coarse)[None].to(device)
647
+ n_window_steps = int(np.ceil(n_steps / sliding_window_len))
648
+ n_step = 0
649
+ for _ in tqdm.tqdm(range(n_window_steps), total=n_window_steps, disable=silent):
650
+ semantic_idx = base_semantic_idx + int(round(n_step / semantic_to_coarse_ratio))
651
+ # pad from right side
652
+ x_in = x_semantic_in[:, np.max([0, semantic_idx - max_semantic_history]) :]
653
+ x_in = x_in[:, :256]
654
+ x_in = F.pad(
655
+ x_in,
656
+ (0, 256 - x_in.shape[-1]),
657
+ "constant",
658
+ COARSE_SEMANTIC_PAD_TOKEN,
659
+ )
660
+ x_in = torch.hstack(
661
+ [
662
+ x_in,
663
+ torch.tensor([COARSE_INFER_TOKEN])[None].to(device),
664
+ x_coarse_in[:, -max_coarse_history:],
665
+ ]
666
+ )
667
+ kv_cache = None
668
+ for _ in range(sliding_window_len):
669
+ if n_step >= n_steps:
670
+ continue
671
+ is_major_step = n_step % N_COARSE_CODEBOOKS == 0
672
+
673
+ if use_kv_caching and kv_cache is not None:
674
+ x_input = x_in[:, [-1]]
675
+ else:
676
+ x_input = x_in
677
+
678
+ logits, kv_cache = model(x_input, use_cache=use_kv_caching, past_kv=kv_cache)
679
+ logit_start_idx = (
680
+ SEMANTIC_VOCAB_SIZE + (1 - int(is_major_step)) * CODEBOOK_SIZE
681
+ )
682
+ logit_end_idx = (
683
+ SEMANTIC_VOCAB_SIZE + (2 - int(is_major_step)) * CODEBOOK_SIZE
684
+ )
685
+ relevant_logits = logits[0, 0, logit_start_idx:logit_end_idx]
686
+ if top_p is not None:
687
+ # faster to convert to numpy
688
+ original_device = relevant_logits.device
689
+ relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy()
690
+ sorted_indices = np.argsort(relevant_logits)[::-1]
691
+ sorted_logits = relevant_logits[sorted_indices]
692
+ cumulative_probs = np.cumsum(softmax(sorted_logits))
693
+ sorted_indices_to_remove = cumulative_probs > top_p
694
+ sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy()
695
+ sorted_indices_to_remove[0] = False
696
+ relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf
697
+ relevant_logits = torch.from_numpy(relevant_logits)
698
+ relevant_logits = relevant_logits.to(original_device)
699
+ if top_k is not None:
700
+ v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1)))
701
+ relevant_logits[relevant_logits < v[-1]] = -float("Inf")
702
+ probs = F.softmax(relevant_logits / temp, dim=-1)
703
+ # multinomial bugged on mps: shuttle to cpu if necessary
704
+ inf_device = probs.device
705
+ if probs.device.type == "mps":
706
+ probs = probs.to("cpu")
707
+ item_next = torch.multinomial(probs, num_samples=1)
708
+ probs = probs.to(inf_device)
709
+ item_next = item_next.to(inf_device)
710
+ item_next += logit_start_idx
711
+ x_coarse_in = torch.cat((x_coarse_in, item_next[None]), dim=1)
712
+ x_in = torch.cat((x_in, item_next[None]), dim=1)
713
+ del logits, relevant_logits, probs, item_next
714
+ n_step += 1
715
+ del x_in
716
+ del x_semantic_in
717
+ if OFFLOAD_CPU:
718
+ model.to("cpu")
719
+ gen_coarse_arr = x_coarse_in.detach().cpu().numpy().squeeze()[len(x_coarse_history) :]
720
+ del x_coarse_in
721
+ assert len(gen_coarse_arr) == n_steps
722
+ gen_coarse_audio_arr = gen_coarse_arr.reshape(-1, N_COARSE_CODEBOOKS).T - SEMANTIC_VOCAB_SIZE
723
+ for n in range(1, N_COARSE_CODEBOOKS):
724
+ gen_coarse_audio_arr[n, :] -= n * CODEBOOK_SIZE
725
+ _clear_cuda_cache()
726
+ return gen_coarse_audio_arr
727
+
728
+
729
+ def generate_fine(
730
+ x_coarse_gen,
731
+ history_prompt=None,
732
+ temp=0.5,
733
+ silent=True,
734
+ ):
735
+ """Generate full audio codes from coarse audio codes."""
736
+ assert (
737
+ isinstance(x_coarse_gen, np.ndarray)
738
+ and len(x_coarse_gen.shape) == 2
739
+ and 1 <= x_coarse_gen.shape[0] <= N_FINE_CODEBOOKS - 1
740
+ and x_coarse_gen.shape[1] > 0
741
+ and x_coarse_gen.min() >= 0
742
+ and x_coarse_gen.max() <= CODEBOOK_SIZE - 1
743
+ )
744
+ if history_prompt is not None:
745
+ history_prompt = _load_history_prompt(history_prompt)
746
+ x_fine_history = history_prompt["fine_prompt"]
747
+ assert (
748
+ isinstance(x_fine_history, np.ndarray)
749
+ and len(x_fine_history.shape) == 2
750
+ and x_fine_history.shape[0] == N_FINE_CODEBOOKS
751
+ and x_fine_history.shape[1] >= 0
752
+ and x_fine_history.min() >= 0
753
+ and x_fine_history.max() <= CODEBOOK_SIZE - 1
754
+ )
755
+ else:
756
+ x_fine_history = None
757
+ n_coarse = x_coarse_gen.shape[0]
758
+ # load models if not yet exist
759
+ global models
760
+ global models_devices
761
+ if "fine" not in models:
762
+ preload_models()
763
+ model = models["fine"]
764
+ if OFFLOAD_CPU:
765
+ model.to(models_devices["fine"])
766
+ device = next(model.parameters()).device
767
+ # make input arr
768
+ in_arr = np.vstack(
769
+ [
770
+ x_coarse_gen,
771
+ np.zeros((N_FINE_CODEBOOKS - n_coarse, x_coarse_gen.shape[1]))
772
+ + CODEBOOK_SIZE, # padding
773
+ ]
774
+ ).astype(np.int32)
775
+ # prepend history if available (max 512)
776
+ if x_fine_history is not None:
777
+ x_fine_history = x_fine_history.astype(np.int32)
778
+ in_arr = np.hstack(
779
+ [
780
+ x_fine_history[:, -512:].astype(np.int32),
781
+ in_arr,
782
+ ]
783
+ )
784
+ n_history = x_fine_history[:, -512:].shape[1]
785
+ else:
786
+ n_history = 0
787
+ n_remove_from_end = 0
788
+ # need to pad if too short (since non-causal model)
789
+ if in_arr.shape[1] < 1024:
790
+ n_remove_from_end = 1024 - in_arr.shape[1]
791
+ in_arr = np.hstack(
792
+ [
793
+ in_arr,
794
+ np.zeros((N_FINE_CODEBOOKS, n_remove_from_end), dtype=np.int32) + CODEBOOK_SIZE,
795
+ ]
796
+ )
797
+ # we can be lazy about fractional loop and just keep overwriting codebooks
798
+ n_loops = np.max([0, int(np.ceil((x_coarse_gen.shape[1] - (1024 - n_history)) / 512))]) + 1
799
+ with _inference_mode():
800
+ in_arr = torch.tensor(in_arr.T).to(device)
801
+ for n in tqdm.tqdm(range(n_loops), disable=silent):
802
+ start_idx = np.min([n * 512, in_arr.shape[0] - 1024])
803
+ start_fill_idx = np.min([n_history + n * 512, in_arr.shape[0] - 512])
804
+ rel_start_fill_idx = start_fill_idx - start_idx
805
+ in_buffer = in_arr[start_idx : start_idx + 1024, :][None]
806
+ for nn in range(n_coarse, N_FINE_CODEBOOKS):
807
+ logits = model(nn, in_buffer)
808
+ if temp is None:
809
+ relevant_logits = logits[0, rel_start_fill_idx:, :CODEBOOK_SIZE]
810
+ codebook_preds = torch.argmax(relevant_logits, -1)
811
+ else:
812
+ relevant_logits = logits[0, :, :CODEBOOK_SIZE] / temp
813
+ probs = F.softmax(relevant_logits, dim=-1)
814
+ # multinomial bugged on mps: shuttle to cpu if necessary
815
+ inf_device = probs.device
816
+ if probs.device.type == "mps":
817
+ probs = probs.to("cpu")
818
+ codebook_preds = torch.hstack(
819
+ [
820
+ torch.multinomial(probs[nnn], num_samples=1).to(inf_device)
821
+ for nnn in range(rel_start_fill_idx, 1024)
822
+ ]
823
+ )
824
+ in_buffer[0, rel_start_fill_idx:, nn] = codebook_preds
825
+ del logits, codebook_preds
826
+ # transfer over info into model_in and convert to numpy
827
+ for nn in range(n_coarse, N_FINE_CODEBOOKS):
828
+ in_arr[
829
+ start_fill_idx : start_fill_idx + (1024 - rel_start_fill_idx), nn
830
+ ] = in_buffer[0, rel_start_fill_idx:, nn]
831
+ del in_buffer
832
+ gen_fine_arr = in_arr.detach().cpu().numpy().squeeze().T
833
+ del in_arr
834
+ if OFFLOAD_CPU:
835
+ model.to("cpu")
836
+ gen_fine_arr = gen_fine_arr[:, n_history:]
837
+ if n_remove_from_end > 0:
838
+ gen_fine_arr = gen_fine_arr[:, :-n_remove_from_end]
839
+ assert gen_fine_arr.shape[-1] == x_coarse_gen.shape[-1]
840
+ _clear_cuda_cache()
841
+ return gen_fine_arr
842
+
843
+
844
+ def codec_decode(fine_tokens):
845
+ """Turn quantized audio codes into audio array using encodec."""
846
+ # load models if not yet exist
847
+ global models
848
+ global models_devices
849
+ if "codec" not in models:
850
+ preload_models()
851
+ model = models["codec"]
852
+ if OFFLOAD_CPU:
853
+ model.to(models_devices["codec"])
854
+ device = next(model.parameters()).device
855
+ arr = torch.from_numpy(fine_tokens)[None]
856
+ arr = arr.to(device)
857
+ arr = arr.transpose(0, 1)
858
+ emb = model.quantizer.decode(arr)
859
+ out = model.decoder(emb)
860
+ audio_arr = out.detach().cpu().numpy().squeeze()
861
+ del arr, emb, out
862
+ if OFFLOAD_CPU:
863
+ model.to("cpu")
864
+ return audio_arr
bark/hubert/__init__.py ADDED
File without changes
bark/hubert/customtokenizer.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Custom tokenizer model.
3
+ Author: https://www.github.com/gitmylo/
4
+ License: MIT
5
+ """
6
+
7
+ import json
8
+ import os.path
9
+ from zipfile import ZipFile
10
+
11
+ import numpy
12
+ import torch
13
+ from torch import nn, optim
14
+ from torch.serialization import MAP_LOCATION
15
+ from tqdm.auto import tqdm
16
+
17
+
18
+ class CustomTokenizer(nn.Module):
19
+ def __init__(self, hidden_size=1024, input_size=768, output_size=10000, version=0):
20
+ super(CustomTokenizer, self).__init__()
21
+ next_size = input_size
22
+ if version == 0:
23
+ self.lstm = nn.LSTM(input_size, hidden_size, 2, batch_first=True)
24
+ next_size = hidden_size
25
+ if version == 1:
26
+ self.lstm = nn.LSTM(input_size, hidden_size, 2, batch_first=True)
27
+ self.intermediate = nn.Linear(hidden_size, 4096)
28
+ next_size = 4096
29
+
30
+ self.fc = nn.Linear(next_size, output_size)
31
+ self.softmax = nn.LogSoftmax(dim=1)
32
+ self.optimizer: optim.Optimizer = None
33
+ self.lossfunc = nn.CrossEntropyLoss()
34
+ self.input_size = input_size
35
+ self.hidden_size = hidden_size
36
+ self.output_size = output_size
37
+ self.version = version
38
+
39
+ def forward(self, x):
40
+ x, _ = self.lstm(x)
41
+ if self.version == 1:
42
+ x = self.intermediate(x)
43
+ x = self.fc(x)
44
+ x = self.softmax(x)
45
+ return x
46
+
47
+ @torch.no_grad()
48
+ def get_token(self, x):
49
+ """
50
+ Used to get the token for the first
51
+ :param x: An array with shape (N, input_size) where N is a whole number greater or equal to 1, and input_size is the input size used when creating the model.
52
+ :return: An array with shape (N,) where N is the same as N from the input. Every number in the array is a whole number in range 0...output_size - 1 where output_size is the output size used when creating the model.
53
+ """
54
+ return torch.argmax(self(x), dim=1)
55
+
56
+ def prepare_training(self):
57
+ self.optimizer = optim.Adam(self.parameters(), 0.001)
58
+
59
+ def train_step(self, x_train, y_train, log_loss=False):
60
+ # y_train = y_train[:-1]
61
+ # y_train = y_train[1:]
62
+
63
+ optimizer = self.optimizer
64
+ lossfunc = self.lossfunc
65
+ # Zero the gradients
66
+ self.zero_grad()
67
+
68
+ # Forward pass
69
+ y_pred = self(x_train)
70
+
71
+ y_train_len = len(y_train)
72
+ y_pred_len = y_pred.shape[0]
73
+
74
+ if y_train_len > y_pred_len:
75
+ diff = y_train_len - y_pred_len
76
+ y_train = y_train[diff:]
77
+ elif y_train_len < y_pred_len:
78
+ diff = y_pred_len - y_train_len
79
+ y_pred = y_pred[:-diff, :]
80
+
81
+ y_train_hot = torch.zeros(len(y_train), self.output_size)
82
+ y_train_hot[range(len(y_train)), y_train] = 1
83
+ y_train_hot = y_train_hot.to('cuda')
84
+
85
+ # Calculate the loss
86
+ loss = lossfunc(y_pred, y_train_hot)
87
+
88
+ # Print loss
89
+ if log_loss:
90
+ print('Loss', loss.item())
91
+
92
+ # Backward pass
93
+ loss.backward()
94
+
95
+ # Update the weights
96
+ optimizer.step()
97
+
98
+ def save(self, path):
99
+ info_path = '.'.join(os.path.basename(path).split('.')[:-1]) + '/.info'
100
+ torch.save(self.state_dict(), path)
101
+ data_from_model = Data(self.input_size, self.hidden_size, self.output_size, self.version)
102
+ with ZipFile(path, 'a') as model_zip:
103
+ model_zip.writestr(info_path, data_from_model.save())
104
+ model_zip.close()
105
+
106
+ @staticmethod
107
+ def load_from_checkpoint(path, map_location: MAP_LOCATION = None):
108
+ old = True
109
+ with ZipFile(path) as model_zip:
110
+ filesMatch = [file for file in model_zip.namelist() if file.endswith('/.info')]
111
+ file = filesMatch[0] if filesMatch else None
112
+ if file:
113
+ old = False
114
+ print(f"Loading Custom Hubert Tokenizer {path}")
115
+ data_from_model = Data.load(model_zip.read(file).decode('utf-8'))
116
+ model_zip.close()
117
+ if old:
118
+ model = CustomTokenizer()
119
+ else:
120
+ model = CustomTokenizer(data_from_model.hidden_size, data_from_model.input_size, data_from_model.output_size, data_from_model.version)
121
+ model.load_state_dict(torch.load(path))
122
+ if map_location:
123
+ model = model.to(map_location)
124
+ return model
125
+
126
+
127
+
128
+ class Data:
129
+ input_size: int
130
+ hidden_size: int
131
+ output_size: int
132
+ version: int
133
+
134
+ def __init__(self, input_size=768, hidden_size=1024, output_size=10000, version=0):
135
+ self.input_size = input_size
136
+ self.hidden_size = hidden_size
137
+ self.output_size = output_size
138
+ self.version = version
139
+
140
+ @staticmethod
141
+ def load(string):
142
+ data = json.loads(string)
143
+ return Data(data['input_size'], data['hidden_size'], data['output_size'], data['version'])
144
+
145
+ def save(self):
146
+ data = {
147
+ 'input_size': self.input_size,
148
+ 'hidden_size': self.hidden_size,
149
+ 'output_size': self.output_size,
150
+ 'version': self.version,
151
+ }
152
+ return json.dumps(data)
153
+
154
+
155
+ def auto_train(data_path, save_path='model.pth', load_model: str | None = None, save_epochs=1, max_epochs=14):
156
+ data_x, data_y = [], []
157
+
158
+ if load_model and os.path.isfile(load_model):
159
+ print('Loading model from', load_model)
160
+ model_training = CustomTokenizer.load_from_checkpoint(load_model, 'cuda')
161
+ else:
162
+ print('Creating new model.')
163
+ model_training = CustomTokenizer(version=1).to('cuda') # Settings for the model to run without lstm
164
+ save_path = os.path.join(data_path, save_path)
165
+ base_save_path = '.'.join(save_path.split('.')[:-1])
166
+
167
+ sem_string = '_semantic.npy'
168
+ feat_string = '_semantic_features.npy'
169
+
170
+ ready = os.path.join(data_path, 'ready')
171
+ for input_file in os.listdir(ready):
172
+ full_path = os.path.join(ready, input_file)
173
+ if input_file.endswith(sem_string):
174
+ data_y.append(numpy.load(full_path))
175
+ elif input_file.endswith(feat_string):
176
+ data_x.append(numpy.load(full_path))
177
+ model_training.prepare_training()
178
+
179
+ epoch = 1
180
+ with tqdm(total=((len(data_x) * len(data_y)) / 50) * save_epochs) as pbar1:
181
+ while epoch <= max_epochs:
182
+ for i in range(save_epochs):
183
+ j = 0
184
+ for x, y in zip(data_x, data_y):
185
+ model_training.train_step(torch.tensor(x).to('cuda'), torch.tensor(y).to('cuda'), j % 50 == 0) # Print loss every 50 steps
186
+ j += 1
187
+ pbar1.update()
188
+
189
+ save_p = save_path
190
+ save_p_2 = f'{base_save_path}_epoch_{epoch}.pth'
191
+ model_training.save(save_p)
192
+ model_training.save(save_p_2)
193
+ print(f'Epoch {epoch} completed')
194
+ epoch += 1
195
+ print(f'Done training for {max_epochs} epochs!')
bark/hubert/hubert_manager.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ import shutil
3
+ import urllib.request
4
+
5
+ import huggingface_hub
6
+
7
+
8
+ class HuBERTManager:
9
+
10
+
11
+ @staticmethod
12
+ def make_sure_hubert_installed(download_url: str = 'https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt', file_name: str = 'hubert.pt'):
13
+ install_dir = os.path.join('models', 'hubert')
14
+ if not os.path.isdir(install_dir):
15
+ os.makedirs(install_dir, exist_ok=True)
16
+ install_file = os.path.join(install_dir, file_name)
17
+ if not os.path.isfile(install_file):
18
+ print(f'Downloading HuBERT base model from {download_url}')
19
+ urllib.request.urlretrieve(download_url, install_file)
20
+ print('Downloaded HuBERT')
21
+ return install_file
22
+
23
+
24
+ @staticmethod
25
+ def make_sure_tokenizer_installed(model: str = 'quantifier_hubert_base_ls960_14.pth', repo: str = 'GitMylo/bark-voice-cloning', tokenizer_lang: str = 'en'):
26
+ local_file = tokenizer_lang + '_tokenizer.pth'
27
+ install_dir = os.path.join('models', 'hubert')
28
+ if not os.path.isdir(install_dir):
29
+ os.makedirs(install_dir, exist_ok=True)
30
+ install_file = os.path.join(install_dir, local_file)
31
+ if not os.path.isfile(install_file):
32
+ # refactor to use lists
33
+ if tokenizer_lang == 'en':
34
+ repo = 'GitMylo/bark-voice-cloning'
35
+ model = 'quantifier_hubert_base_ls960_14.pth'
36
+ elif tokenizer_lang == 'de':
37
+ repo = 'CountFloyd/bark-voice-cloning-german-HuBERT-quantizer'
38
+ model = 'german-HuBERT-quantizer_14_epoch.pth'
39
+ elif tokenizer_lang == 'pl':
40
+ repo = 'Hobis/bark-voice-cloning-polish-HuBERT-quantizer'
41
+ model = 'polish-HuBERT-quantizer_8_epoch.pth'
42
+ else:
43
+ raise 'Unknown Tokenizer Language!'
44
+ print(f'{local_file} not found. Downloading HuBERT custom tokenizer')
45
+ huggingface_hub.hf_hub_download(repo, model, local_dir=install_dir, local_dir_use_symlinks=False)
46
+ shutil.move(os.path.join(install_dir, model), install_file)
47
+ print('Downloaded tokenizer')
48
+ return install_file
bark/hubert/pre_kmeans_hubert.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Modified HuBERT model without kmeans.
3
+ Original author: https://github.com/lucidrains/
4
+ Modified by: https://www.github.com/gitmylo/
5
+ License: MIT
6
+ """
7
+
8
+ # Modified code from https://github.com/lucidrains/audiolm-pytorch/blob/main/audiolm_pytorch/hubert_kmeans.py
9
+
10
+ from pathlib import Path
11
+
12
+ import torch
13
+ from torch import nn
14
+ from einops import pack, unpack
15
+
16
+ import fairseq
17
+
18
+ from torchaudio.functional import resample
19
+
20
+ from audiolm_pytorch.utils import curtail_to_multiple
21
+
22
+ import logging
23
+ logging.root.setLevel(logging.ERROR)
24
+
25
+
26
+ def exists(val):
27
+ return val is not None
28
+
29
+
30
+ def default(val, d):
31
+ return val if exists(val) else d
32
+
33
+
34
+ class CustomHubert(nn.Module):
35
+ """
36
+ checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert
37
+ or you can train your own
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ checkpoint_path,
43
+ target_sample_hz=16000,
44
+ seq_len_multiple_of=None,
45
+ output_layer=9,
46
+ device=None
47
+ ):
48
+ super().__init__()
49
+ self.target_sample_hz = target_sample_hz
50
+ self.seq_len_multiple_of = seq_len_multiple_of
51
+ self.output_layer = output_layer
52
+
53
+ if device is not None:
54
+ self.to(device)
55
+
56
+ model_path = Path(checkpoint_path)
57
+
58
+ assert model_path.exists(), f'path {checkpoint_path} does not exist'
59
+
60
+ print(f"Loading Hubert {checkpoint_path}")
61
+ checkpoint = torch.load(checkpoint_path)
62
+ load_model_input = {checkpoint_path: checkpoint}
63
+ model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
64
+
65
+ if device is not None:
66
+ model[0].to(device)
67
+
68
+ self.model = model[0]
69
+ self.model.eval()
70
+
71
+ @property
72
+ def groups(self):
73
+ return 1
74
+
75
+ @torch.no_grad()
76
+ def forward(
77
+ self,
78
+ wav_input,
79
+ flatten=True,
80
+ input_sample_hz=None
81
+ ):
82
+ device = wav_input.device
83
+
84
+ if exists(input_sample_hz):
85
+ wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
86
+
87
+ if exists(self.seq_len_multiple_of):
88
+ wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of)
89
+
90
+ embed = self.model(
91
+ wav_input,
92
+ features_only=True,
93
+ mask=False, # thanks to @maitycyrus for noticing that mask is defaulted to True in the fairseq code
94
+ output_layer=self.output_layer
95
+ )
96
+
97
+ embed, packed_shape = pack([embed['x']], '* d')
98
+
99
+ # codebook_indices = self.kmeans.predict(embed.cpu().detach().numpy())
100
+
101
+ codebook_indices = torch.from_numpy(embed.cpu().detach().numpy()).to(device) # .long()
102
+
103
+ if flatten:
104
+ return codebook_indices
105
+
106
+ codebook_indices, = unpack(codebook_indices, packed_shape, '*')
107
+ return codebook_indices
bark/model.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Much of this code is adapted from Andrej Karpathy's NanoGPT
3
+ (https://github.com/karpathy/nanoGPT)
4
+ """
5
+ import math
6
+ from dataclasses import dataclass
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ from torch.nn import functional as F
11
+
12
+ class LayerNorm(nn.Module):
13
+ """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
14
+
15
+ def __init__(self, ndim, bias):
16
+ super().__init__()
17
+ self.weight = nn.Parameter(torch.ones(ndim))
18
+ self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
19
+
20
+ def forward(self, input):
21
+ return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
22
+
23
+ class CausalSelfAttention(nn.Module):
24
+
25
+ def __init__(self, config):
26
+ super().__init__()
27
+ assert config.n_embd % config.n_head == 0
28
+ # key, query, value projections for all heads, but in a batch
29
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
30
+ # output projection
31
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
32
+ # regularization
33
+ self.attn_dropout = nn.Dropout(config.dropout)
34
+ self.resid_dropout = nn.Dropout(config.dropout)
35
+ self.n_head = config.n_head
36
+ self.n_embd = config.n_embd
37
+ self.dropout = config.dropout
38
+ # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary
39
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
40
+ if not self.flash:
41
+ # print("WARNING: using slow attention. Flash Attention atm needs PyTorch nightly and dropout=0.0")
42
+ # causal mask to ensure that attention is only applied to the left in the input sequence
43
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
44
+ .view(1, 1, config.block_size, config.block_size))
45
+
46
+ def forward(self, x, past_kv=None, use_cache=False):
47
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
48
+
49
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
50
+ q, k ,v = self.c_attn(x).split(self.n_embd, dim=2)
51
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
52
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
53
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
54
+
55
+ if past_kv is not None:
56
+ past_key = past_kv[0]
57
+ past_value = past_kv[1]
58
+ k = torch.cat((past_key, k), dim=-2)
59
+ v = torch.cat((past_value, v), dim=-2)
60
+
61
+ FULL_T = k.shape[-2]
62
+
63
+ if use_cache is True:
64
+ present = (k, v)
65
+ else:
66
+ present = None
67
+
68
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
69
+ if self.flash:
70
+ # efficient attention using Flash Attention CUDA kernels
71
+ if past_kv is not None:
72
+ # When `past_kv` is provided, we're doing incremental decoding and `q.shape[2] == 1`: q only contains
73
+ # the query for the last token. scaled_dot_product_attention interprets this as the first token in the
74
+ # sequence, so if is_causal=True it will mask out all attention from it. This is not what we want, so
75
+ # to work around this we set is_causal=False.
76
+ is_causal = False
77
+ else:
78
+ is_causal = True
79
+
80
+ y = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=self.dropout, is_causal=is_causal)
81
+ else:
82
+ # manual implementation of attention
83
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
84
+ att = att.masked_fill(self.bias[:,:,FULL_T-T:FULL_T,:FULL_T] == 0, float('-inf'))
85
+ att = F.softmax(att, dim=-1)
86
+ att = self.attn_dropout(att)
87
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
88
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
89
+
90
+ # output projection
91
+ y = self.resid_dropout(self.c_proj(y))
92
+ return (y, present)
93
+
94
+ class MLP(nn.Module):
95
+
96
+ def __init__(self, config):
97
+ super().__init__()
98
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
99
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
100
+ self.dropout = nn.Dropout(config.dropout)
101
+ self.gelu = nn.GELU()
102
+
103
+ def forward(self, x):
104
+ x = self.c_fc(x)
105
+ x = self.gelu(x)
106
+ x = self.c_proj(x)
107
+ x = self.dropout(x)
108
+ return x
109
+
110
+ class Block(nn.Module):
111
+
112
+ def __init__(self, config, layer_idx):
113
+ super().__init__()
114
+ self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
115
+ self.attn = CausalSelfAttention(config)
116
+ self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
117
+ self.mlp = MLP(config)
118
+ self.layer_idx = layer_idx
119
+
120
+ def forward(self, x, past_kv=None, use_cache=False):
121
+ attn_output, prev_kvs = self.attn(self.ln_1(x), past_kv=past_kv, use_cache=use_cache)
122
+ x = x + attn_output
123
+ x = x + self.mlp(self.ln_2(x))
124
+ return (x, prev_kvs)
125
+
126
+ @dataclass
127
+ class GPTConfig:
128
+ block_size: int = 1024
129
+ input_vocab_size: int = 10_048
130
+ output_vocab_size: int = 10_048
131
+ n_layer: int = 12
132
+ n_head: int = 12
133
+ n_embd: int = 768
134
+ dropout: float = 0.0
135
+ bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
136
+
137
+ class GPT(nn.Module):
138
+
139
+ def __init__(self, config):
140
+ super().__init__()
141
+ assert config.input_vocab_size is not None
142
+ assert config.output_vocab_size is not None
143
+ assert config.block_size is not None
144
+ self.config = config
145
+
146
+ self.transformer = nn.ModuleDict(dict(
147
+ wte = nn.Embedding(config.input_vocab_size, config.n_embd),
148
+ wpe = nn.Embedding(config.block_size, config.n_embd),
149
+ drop = nn.Dropout(config.dropout),
150
+ h = nn.ModuleList([Block(config, idx) for idx in range(config.n_layer)]),
151
+ ln_f = LayerNorm(config.n_embd, bias=config.bias),
152
+ ))
153
+ self.lm_head = nn.Linear(config.n_embd, config.output_vocab_size, bias=False)
154
+
155
+ def get_num_params(self, non_embedding=True):
156
+ """
157
+ Return the number of parameters in the model.
158
+ For non-embedding count (default), the position embeddings get subtracted.
159
+ The token embeddings would too, except due to the parameter sharing these
160
+ params are actually used as weights in the final layer, so we include them.
161
+ """
162
+ n_params = sum(p.numel() for p in self.parameters())
163
+ if non_embedding:
164
+ n_params -= self.transformer.wte.weight.numel()
165
+ n_params -= self.transformer.wpe.weight.numel()
166
+ return n_params
167
+
168
+ def forward(self, idx, merge_context=False, past_kv=None, position_ids=None, use_cache=False):
169
+ device = idx.device
170
+ b, t = idx.size()
171
+ if past_kv is not None:
172
+ assert t == 1
173
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
174
+ else:
175
+ if merge_context:
176
+ assert(idx.shape[1] >= 256+256+1)
177
+ t = idx.shape[1] - 256
178
+ else:
179
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
180
+
181
+ # forward the GPT model itself
182
+ if merge_context:
183
+ tok_emb = torch.cat([
184
+ self.transformer.wte(idx[:,:256]) + self.transformer.wte(idx[:,256:256+256]),
185
+ self.transformer.wte(idx[:,256+256:])
186
+ ], dim=1)
187
+ else:
188
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
189
+
190
+ if past_kv is None:
191
+ past_length = 0
192
+ past_kv = tuple([None] * len(self.transformer.h))
193
+ else:
194
+ past_length = past_kv[0][0].size(-2)
195
+
196
+ if position_ids is None:
197
+ position_ids = torch.arange(past_length, t + past_length, dtype=torch.long, device=device)
198
+ position_ids = position_ids.unsqueeze(0) # shape (1, t)
199
+ assert position_ids.shape == (1, t)
200
+
201
+ pos_emb = self.transformer.wpe(position_ids) # position embeddings of shape (1, t, n_embd)
202
+
203
+ x = self.transformer.drop(tok_emb + pos_emb)
204
+
205
+ new_kv = () if use_cache else None
206
+
207
+ for i, (block, past_layer_kv) in enumerate(zip(self.transformer.h, past_kv)):
208
+ x, kv = block(x, past_kv=past_layer_kv, use_cache=use_cache)
209
+
210
+ if use_cache:
211
+ new_kv = new_kv + (kv,)
212
+
213
+ x = self.transformer.ln_f(x)
214
+
215
+ # inference-time mini-optimization: only forward the lm_head on the very last position
216
+ logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
217
+
218
+ return (logits, new_kv)
bark/model_fine.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Much of this code is adapted from Andrej Karpathy's NanoGPT
3
+ (https://github.com/karpathy/nanoGPT)
4
+ """
5
+ from dataclasses import dataclass
6
+ import math
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ from torch.nn import functional as F
11
+
12
+ from .model import GPT, GPTConfig, MLP
13
+
14
+
15
+ class NonCausalSelfAttention(nn.Module):
16
+ def __init__(self, config):
17
+ super().__init__()
18
+ assert config.n_embd % config.n_head == 0
19
+ # key, query, value projections for all heads, but in a batch
20
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
21
+ # output projection
22
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
23
+ # regularization
24
+ self.attn_dropout = nn.Dropout(config.dropout)
25
+ self.resid_dropout = nn.Dropout(config.dropout)
26
+ self.n_head = config.n_head
27
+ self.n_embd = config.n_embd
28
+ self.dropout = config.dropout
29
+ # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary
30
+ self.flash = (
31
+ hasattr(torch.nn.functional, "scaled_dot_product_attention") and self.dropout == 0.0
32
+ )
33
+
34
+ def forward(self, x):
35
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
36
+
37
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
38
+ q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
39
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
40
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
41
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
42
+
43
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
44
+ if self.flash:
45
+ # efficient attention using Flash Attention CUDA kernels
46
+ y = torch.nn.functional.scaled_dot_product_attention(
47
+ q, k, v, attn_mask=None, dropout_p=self.dropout, is_causal=False
48
+ )
49
+ else:
50
+ # manual implementation of attention
51
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
52
+ att = F.softmax(att, dim=-1)
53
+ att = self.attn_dropout(att)
54
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
55
+ y = (
56
+ y.transpose(1, 2).contiguous().view(B, T, C)
57
+ ) # re-assemble all head outputs side by side
58
+
59
+ # output projection
60
+ y = self.resid_dropout(self.c_proj(y))
61
+ return y
62
+
63
+
64
+ class FineBlock(nn.Module):
65
+ def __init__(self, config):
66
+ super().__init__()
67
+ self.ln_1 = nn.LayerNorm(config.n_embd)
68
+ self.attn = NonCausalSelfAttention(config)
69
+ self.ln_2 = nn.LayerNorm(config.n_embd)
70
+ self.mlp = MLP(config)
71
+
72
+ def forward(self, x):
73
+ x = x + self.attn(self.ln_1(x))
74
+ x = x + self.mlp(self.ln_2(x))
75
+ return x
76
+
77
+
78
+ class FineGPT(GPT):
79
+ def __init__(self, config):
80
+ super().__init__(config)
81
+ del self.lm_head
82
+ self.config = config
83
+ self.n_codes_total = config.n_codes_total
84
+ self.transformer = nn.ModuleDict(
85
+ dict(
86
+ wtes=nn.ModuleList(
87
+ [
88
+ nn.Embedding(config.input_vocab_size, config.n_embd)
89
+ for _ in range(config.n_codes_total)
90
+ ]
91
+ ),
92
+ wpe=nn.Embedding(config.block_size, config.n_embd),
93
+ drop=nn.Dropout(config.dropout),
94
+ h=nn.ModuleList([FineBlock(config) for _ in range(config.n_layer)]),
95
+ ln_f=nn.LayerNorm(config.n_embd),
96
+ )
97
+ )
98
+ self.lm_heads = nn.ModuleList(
99
+ [
100
+ nn.Linear(config.n_embd, config.output_vocab_size, bias=False)
101
+ for _ in range(config.n_codes_given, self.n_codes_total)
102
+ ]
103
+ )
104
+ for i in range(self.n_codes_total - config.n_codes_given):
105
+ self.transformer.wtes[i + 1].weight = self.lm_heads[i].weight
106
+
107
+ def forward(self, pred_idx, idx):
108
+ device = idx.device
109
+ b, t, codes = idx.size()
110
+ assert (
111
+ t <= self.config.block_size
112
+ ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
113
+ assert pred_idx > 0, "cannot predict 0th codebook"
114
+ assert codes == self.n_codes_total, (b, t, codes)
115
+ pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t)
116
+
117
+ # forward the GPT model itself
118
+ tok_embs = [
119
+ wte(idx[:, :, i]).unsqueeze(-1) for i, wte in enumerate(self.transformer.wtes)
120
+ ] # token embeddings of shape (b, t, n_embd)
121
+ tok_emb = torch.cat(tok_embs, dim=-1)
122
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_embd)
123
+ x = tok_emb[:, :, :, : pred_idx + 1].sum(dim=-1)
124
+ x = self.transformer.drop(x + pos_emb)
125
+ for block in self.transformer.h:
126
+ x = block(x)
127
+ x = self.transformer.ln_f(x)
128
+ logits = self.lm_heads[pred_idx - self.config.n_codes_given](x)
129
+ return logits
130
+
131
+ def get_num_params(self, non_embedding=True):
132
+ """
133
+ Return the number of parameters in the model.
134
+ For non-embedding count (default), the position embeddings get subtracted.
135
+ The token embeddings would too, except due to the parameter sharing these
136
+ params are actually used as weights in the final layer, so we include them.
137
+ """
138
+ n_params = sum(p.numel() for p in self.parameters())
139
+ if non_embedding:
140
+ for wte in self.transformer.wtes:
141
+ n_params -= wte.weight.numel()
142
+ n_params -= self.transformer.wpe.weight.numel()
143
+ return n_params
144
+
145
+
146
+ @dataclass
147
+ class FineGPTConfig(GPTConfig):
148
+ n_codes_total: int = 8
149
+ n_codes_given: int = 1
bark/settings.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ def initenv(args):
4
+ os.environ['SUNO_USE_SMALL_MODELS'] = str("-smallmodels" in args)
5
+ os.environ['BARK_FORCE_CPU'] = str("-forcecpu" in args)
6
+ os.environ['SUNO_ENABLE_MPS'] = str("-enablemps" in args)
7
+ os.environ['SUNO_OFFLOAD_CPU'] = str("-offloadcpu" in args)
cloning/__init__.py ADDED
File without changes
cloning/clonevoice.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bark.generation import load_codec_model, generate_text_semantic, grab_best_device
2
+ from encodec.utils import convert_audio
3
+ from bark.hubert.hubert_manager import HuBERTManager
4
+ from bark.hubert.pre_kmeans_hubert import CustomHubert
5
+ from bark.hubert.customtokenizer import CustomTokenizer
6
+
7
+ import torchaudio
8
+ import torch
9
+ import os
10
+ import gradio
11
+
12
+
13
+ def clone_voice(audio_filepath, dest_filename, progress=gradio.Progress(track_tqdm=True)):
14
+ # if len(text) < 1:
15
+ # raise gradio.Error('No transcription text entered!')
16
+
17
+ use_gpu = False # not os.environ.get("BARK_FORCE_CPU", False)
18
+ progress(0, desc="Loading Codec")
19
+ model = load_codec_model(use_gpu=use_gpu)
20
+
21
+ # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer
22
+ hubert_manager = HuBERTManager()
23
+ hubert_manager.make_sure_hubert_installed()
24
+ hubert_manager.make_sure_tokenizer_installed()
25
+
26
+ # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer
27
+ # Load HuBERT for semantic tokens
28
+
29
+ # Load the HuBERT model
30
+ device = grab_best_device(use_gpu)
31
+ hubert_model = CustomHubert(checkpoint_path='./models/hubert/hubert.pt').to(device)
32
+
33
+ # Load the CustomTokenizer model
34
+ tokenizer = CustomTokenizer.load_from_checkpoint('./models/hubert/en_tokenizer.pth').to(device) # change to the correct path
35
+
36
+ progress(0.25, desc="Converting WAV")
37
+
38
+ # Load and pre-process the audio waveform
39
+ wav, sr = torchaudio.load(audio_filepath)
40
+ if wav.shape[0] == 2: # Stereo to mono if needed
41
+ wav = wav.mean(0, keepdim=True)
42
+
43
+ wav = convert_audio(wav, sr, model.sample_rate, model.channels)
44
+ wav = wav.to(device)
45
+ progress(0.5, desc="Extracting codes")
46
+
47
+ semantic_vectors = hubert_model.forward(wav, input_sample_hz=model.sample_rate)
48
+ semantic_tokens = tokenizer.get_token(semantic_vectors)
49
+
50
+ # Extract discrete codes from EnCodec
51
+ with torch.no_grad():
52
+ encoded_frames = model.encode(wav.unsqueeze(0))
53
+ codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() # [n_q, T]
54
+
55
+ # get seconds of audio
56
+ # seconds = wav.shape[-1] / model.sample_rate
57
+ # generate semantic tokens
58
+ # semantic_tokens = generate_text_semantic(text, max_gen_duration_s=seconds, top_k=50, top_p=.95, temp=0.7)
59
+
60
+ # move codes to cpu
61
+ codes = codes.cpu().numpy()
62
+ # move semantic tokens to cpu
63
+ semantic_tokens = semantic_tokens.cpu().numpy()
64
+
65
+ import numpy as np
66
+ output_path = dest_filename + '.npz'
67
+ np.savez(output_path, fine_prompt=codes, coarse_prompt=codes[:2, :], semantic_prompt=semantic_tokens)
68
+ return ["Finished", output_path]
config.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ input_text_desired_length: 110
2
+ input_text_max_length: 170
3
+ selected_theme: JohnSmith9982/small_and_pretty
4
+ server_name: ''
5
+ server_port: 0
6
+ server_share: false
7
+ silence_between_sentences: 250
8
+ silence_between_speakers: 500
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ffmpeg
pyproject.toml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "bark-ui-enhanced"
7
+ version = "0.7.0"
8
+ description = "Bark text to audio model with addition features and a Web UI"
9
+ readme = "README.md"
10
+ requires-python = ">=3.8"
11
+ authors = [
12
+ {name = "Suno Inc (original Bark)", email = "[email protected]"},
13
+ {name = "Count Floyd"},
14
+ ]
15
+ # MIT License
16
+ license = {file = "LICENSE"}
17
+
18
+ dependencies = [
19
+ "boto3",
20
+ "encodec",
21
+ "funcy",
22
+ "huggingface-hub>=0.14.1",
23
+ "numpy",
24
+ "scipy",
25
+ "tokenizers",
26
+ "torch",
27
+ "tqdm",
28
+ "transformers",
29
+ ]
30
+
31
+ [project.urls]
32
+ source = "https://github.com/C0untFloyd/bark-gui"
33
+
34
+ [project.optional-dependencies]
35
+ dev = [
36
+ "bandit",
37
+ "black",
38
+ "codecov",
39
+ "flake8",
40
+ "hypothesis>=6.14,<7",
41
+ "isort>=5.0.0,<6",
42
+ "jupyter",
43
+ "mypy",
44
+ "nbconvert",
45
+ "nbformat",
46
+ "pydocstyle",
47
+ "pylint",
48
+ "pytest",
49
+ "pytest-cov",
50
+ ]
51
+
52
+ [tool.setuptools]
53
+ packages = ["bark"]
54
+
55
+ [tool.setuptools.package-data]
56
+ bark = ["assets/prompts/*.npz", "assets/prompts/v2/*.npz"]
57
+
58
+
59
+ [tool.black]
60
+ line-length = 100
61
+ target-version = ["py37", "py38", "py39", "py310"]
62
+ include = '\.pyi?$'
63
+
64
+ [tool.isort]
65
+ profile = "black"
66
+ line_length = 100
67
+ skip_gitignore = true
68
+ known_first_party = ["df", "libdf", "libdfdata"]
69
+
requirements.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fairseq; platform_system != "Windows"
2
+ fairseq@https://github.com/Sharrnah/fairseq/releases/download/v0.12.4/fairseq-0.12.4-cp310-cp310-win_amd64.whl; platform_system == "Windows"
3
+ audiolm-pytorch
4
+ gradio
5
+ funcy
6
+ linkify
7
+ mutagen
8
+ pytorch_seed
9
+ pyyaml
10
+ sentencepiece
11
+ soundfile; platform_system == "Windows"
12
+ sox; platform_system != "Windows"
13
+ transformers
14
+ torch==1.13.0
15
+ torchaudio==0.13
16
+ deepfilternet
17
+ matplotlib
18
+ Pillow
samples/dkitchen.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bc229639249ce876bd40bbff15eae4553b7c15cdcf0b720ed814062b4956af6
3
+ size 2880044
samples/dliving.wav ADDED
Binary file (960 kB). View file
 
samples/nriver.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aee2a2cae1f4f0d88f77c5d1616e6fbbc23bad88d287a21700e5b8519426e75
3
+ size 2880044
samples/p232_013_clean.wav ADDED
Binary file (379 kB). View file
 
samples/p232_019_clean.wav ADDED
Binary file (647 kB). View file
 
samples/scafe.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52cf963076f6de7d7c837ad5303b9b6416854558111ac5d6bd23926b38da281e
3
+ size 2880044