Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 5,278 Bytes
bae0b06 dde297c c2546a5 8935672 c2546a5 8935672 7f43d7a 8935672 c2546a5 7f43d7a c2546a5 8935672 7f43d7a c2546a5 7f43d7a 8935672 dde297c c2546a5 7f43d7a c2546a5 8935672 c2546a5 7f43d7a 8935672 c2546a5 7f43d7a c2546a5 8935672 7f43d7a c2546a5 7f43d7a c2546a5 7f43d7a c2546a5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
import os
os.system('cd monotonic_align && python setup.py build_ext --inplace && cd ..')
import librosa
import numpy as np
import torch
from torch import no_grad, LongTensor
import commons
import utils
import gradio as gr
from models import SynthesizerTrn
from text import text_to_sequence
from mel_processing import spectrogram_torch
def get_text(text, hps):
text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
if hps.data.add_blank:
text_norm = commons.intersperse(text_norm, 0)
text_norm = LongTensor(text_norm)
return text_norm
def tts_fn(text, speaker):
if len(text) > 150:
return "Error: Text is too long", None
model, hps = models[model_idx[speaker]]
speaker_id = speaker_idx[speaker]
stn_tst = get_text(text, hps)
with no_grad():
x_tst = stn_tst.unsqueeze(0)
x_tst_lengths = LongTensor([stn_tst.size(0)])
sid = LongTensor([speaker_id])
audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][
0, 0].data.cpu().float().numpy()
return "Success", (hps.data.sampling_rate, audio)
def vc_fn(original_speaker, target_speaker, input_audio):
if input_audio is None:
return "You need to upload an audio", None
sampling_rate, audio = input_audio
duration = audio.shape[0] / sampling_rate
if duration > 30:
return "Error: Audio is too long", None
if model_idx[original_speaker] != model_idx[target_speaker]:
return "Error: Can not convert voice between different model", None
model, hps = models[model_idx[original_speaker]]
original_speaker_id = speaker_idx[original_speaker]
target_speaker_id = speaker_idx[target_speaker]
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
if len(audio.shape) > 1:
audio = librosa.to_mono(audio.transpose(1, 0))
if sampling_rate != hps.data.sampling_rate:
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate)
y = torch.FloatTensor(audio)
y = y.unsqueeze(0)
spec = spectrogram_torch(y, hps.data.filter_length,
hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,
center=False)
spec_lengths = LongTensor([spec.size(-1)])
sid_src = LongTensor([original_speaker_id])
sid_tgt = LongTensor([target_speaker_id])
with no_grad():
audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][
0, 0].data.cpu().float().numpy()
return "Success", (hps.data.sampling_rate, audio)
if __name__ == '__main__':
models = []
model_idx = []
speaker_idx = []
speakers = []
for i in range(0, 2):
config_path = f"saved_model/{i}/config.json"
model_path = f"saved_model/{i}/model.pth"
hps = utils.get_hparams_from_file(config_path)
model = SynthesizerTrn(
len(hps.symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model)
utils.load_checkpoint(model_path, model, None)
model.eval()
models.append((model, hps))
speakers = speakers + [f"model{i}/{x}" for x in hps.speakers]
model_idx = model_idx + [i] * len(hps.speakers)
speaker_idx = speaker_idx + list(range(0, len(hps.speakers)))
app = gr.Blocks()
with app:
gr.Markdown("# Moe Japanese TTS And Voice Conversion Using VITS Model\n\n"
"\n\n"
"unofficial demo for [https://github.com/CjangCjengh/MoeGoe](https://github.com/CjangCjengh/MoeGoe)"
)
with gr.Tabs():
with gr.TabItem("TTS"):
with gr.Column():
tts_input1 = gr.TextArea(label="Text (150 words limitation)", value="γγγ«γ‘γ―γ")
tts_input2 = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[0])
tts_submit = gr.Button("Generate", variant="primary")
tts_output1 = gr.Textbox(label="Output Message")
tts_output2 = gr.Audio(label="Output Audio")
with gr.TabItem("Voice Conversion"):
with gr.Column():
vc_input1 = gr.Dropdown(label="Original Speaker", choices=speakers, type="index",
value=speakers[0])
vc_input2 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index",
value=speakers[1])
vc_input3 = gr.Audio(label="Input Audio (30s limitation)")
vc_submit = gr.Button("Convert", variant="primary")
vc_output1 = gr.Textbox(label="Output Message")
vc_output2 = gr.Audio(label="Output Audio")
tts_submit.click(tts_fn, [tts_input1, tts_input2], [tts_output1, tts_output2])
vc_submit.click(vc_fn, [vc_input1, vc_input2, vc_input3], [vc_output1, vc_output2])
app.launch()
|