|
import gradio as gr |
|
import os |
|
import shutil |
|
|
|
|
|
import numpy as np |
|
from scipy.io import wavfile |
|
""" |
|
model_ids = [ |
|
'suno/bark', |
|
] |
|
|
|
for model_id in model_ids: |
|
model_name = model_id.split('/')[-1] |
|
snapshot_download(model_id, local_dir=f'checkpoints/{model_name}') |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from TTS.api import TTS |
|
tts = TTS("tts_models/multilingual/multi-dataset/bark", gpu=True) |
|
|
|
def infer(prompt, input_wav_file): |
|
|
|
print("SAVING THE AUDIO FILE TO WHERE IT BELONGS") |
|
|
|
|
|
source_path = input_wav_file |
|
|
|
|
|
destination_directory = "bark_voices" |
|
|
|
|
|
file_name = os.path.splitext(os.path.basename(source_path))[0] |
|
|
|
|
|
destination_path = os.path.join(destination_directory, file_name) |
|
|
|
|
|
os.makedirs(destination_path, exist_ok=True) |
|
|
|
|
|
shutil.move(source_path, os.path.join(destination_path, f"{file_name}.wav")) |
|
|
|
""" |
|
text = prompt |
|
|
|
print("SYNTHETIZING...") |
|
# with random speaker |
|
#output_dict = model.synthesize(text, config, speaker_id="random", voice_dirs=None) |
|
|
|
# cloning a speaker. |
|
# It assumes that you have a speaker file in `bark_voices/speaker_n/speaker.wav` or `bark_voices/speaker_n/speaker.npz` |
|
output_dict = model.synthesize( |
|
text, |
|
config, |
|
speaker_id=f"{file_name}", |
|
voice_dirs="bark_voices/", |
|
gpu=True |
|
) |
|
|
|
print(output_dict) |
|
|
|
|
|
|
|
sample_rate = 24000 # Replace with the actual sample rate |
|
print("WRITING WAVE FILE") |
|
wavfile.write( |
|
'output.wav', |
|
sample_rate, |
|
output_dict['wav'] |
|
) |
|
""" |
|
|
|
tts.tts_to_file(text=prompt, |
|
file_path="output.wav", |
|
voice_dir="bark_voices/", |
|
speaker=f"{file_name}") |
|
|
|
|
|
contents = os.listdir(f"bark_voices/{file_name}") |
|
|
|
|
|
for item in contents: |
|
print(item) |
|
|
|
return "output.wav", f"bark_voices/{file_name}/{contents[1]}" |
|
|
|
|
|
css = """ |
|
#col-container {max-width: 580px; margin-left: auto; margin-right: auto;} |
|
""" |
|
|
|
with gr.Blocks(css=css) as demo: |
|
with gr.Column(elem_id="col-container"): |
|
|
|
gr.HTML(""" |
|
<h1 style="text-align: center;">Instant Voice Cloning</h1> |
|
<p style="text-align: center;"> |
|
Clone any voice in less than 2 minutes with this <a href="https://tts.readthedocs.io/en/dev/models/bark.html" target="_blank">Coqui TSS + Bark</a> demo ! <br /> |
|
Upload a clean 20 seconds WAV file of the voice you want to clone, then click submit ! <br /> |
|
</p> |
|
""") |
|
|
|
prompt = gr.Textbox( |
|
label="Text to speech prompt" |
|
) |
|
|
|
audio_in = gr.Audio( |
|
label="WAV voice to clone", |
|
type="filepath", |
|
source="upload" |
|
) |
|
|
|
submit_btn = gr.Button("Submit") |
|
submit_with_npz_btn = gr.Button("Submit 2", visible=False) |
|
|
|
cloned_out = gr.Audio( |
|
label="Text to speech output" |
|
) |
|
|
|
npz_file = gr.File( |
|
label=".npz file" |
|
) |
|
|
|
submit_btn.click( |
|
fn = infer, |
|
inputs = [ |
|
prompt, |
|
audio_in |
|
], |
|
outputs = [ |
|
cloned_out, |
|
npz_file |
|
] |
|
) |
|
|
|
demo.queue().launch() |