Spaces:
Runtime error
Runtime error
# Copyright 2022-2023 Xiaomi Corp. (authors: Fangjun Kuang) | |
# | |
# See LICENSE for clarification regarding multiple authors | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
from functools import lru_cache | |
import sherpa_onnx | |
from huggingface_hub import hf_hub_download | |
def get_file( | |
repo_id: str, | |
filename: str, | |
subfolder: str = ".", | |
) -> str: | |
model_filename = hf_hub_download( | |
repo_id=repo_id, | |
filename=filename, | |
subfolder=subfolder, | |
) | |
return model_filename | |
def _get_vits_vctk(repo_id: str, speed: float) -> sherpa_onnx.OfflineTts: | |
assert repo_id == "csukuangfj/vits-vctk" | |
model = get_file( | |
repo_id=repo_id, | |
filename="vits-vctk.onnx", | |
subfolder=".", | |
) | |
lexicon = get_file( | |
repo_id=repo_id, | |
filename="lexicon.txt", | |
subfolder=".", | |
) | |
tokens = get_file( | |
repo_id=repo_id, | |
filename="tokens.txt", | |
subfolder=".", | |
) | |
tts_config = sherpa_onnx.OfflineTtsConfig( | |
model=sherpa_onnx.OfflineTtsModelConfig( | |
vits=sherpa_onnx.OfflineTtsVitsModelConfig( | |
model=model, | |
lexicon=lexicon, | |
tokens=tokens, | |
length_scale=1.0 / speed, | |
), | |
provider="cpu", | |
debug=True, | |
num_threads=2, | |
) | |
) | |
tts = sherpa_onnx.OfflineTts(tts_config) | |
return tts | |
def _get_vits_ljs(repo_id: str, speed: float) -> sherpa_onnx.OfflineTts: | |
assert repo_id == "csukuangfj/vits-ljs" | |
model = get_file( | |
repo_id=repo_id, | |
filename="vits-ljs.onnx", | |
subfolder=".", | |
) | |
lexicon = get_file( | |
repo_id=repo_id, | |
filename="lexicon.txt", | |
subfolder=".", | |
) | |
tokens = get_file( | |
repo_id=repo_id, | |
filename="tokens.txt", | |
subfolder=".", | |
) | |
tts_config = sherpa_onnx.OfflineTtsConfig( | |
model=sherpa_onnx.OfflineTtsModelConfig( | |
vits=sherpa_onnx.OfflineTtsVitsModelConfig( | |
model=model, | |
lexicon=lexicon, | |
tokens=tokens, | |
length_scale=1.0 / speed, | |
), | |
provider="cpu", | |
debug=True, | |
num_threads=2, | |
) | |
) | |
tts = sherpa_onnx.OfflineTts(tts_config) | |
return tts | |
def _get_vits_piper(repo_id: str, speed: float) -> sherpa_onnx.OfflineTts: | |
data_dir = "/tmp/espeak-ng-data" | |
if "coqui" in repo_id or "vits-mms" in repo_id: | |
name = "model" | |
elif "piper" in repo_id: | |
n = len("vits-piper-") | |
name = repo_id.split("/")[1][n:] | |
elif "mimic3" in repo_id: | |
n = len("vits-mimic3-") | |
name = repo_id.split("/")[1][n:] | |
else: | |
raise ValueError(f"Unsupported {repo_id}") | |
if "vits-coqui-uk-mai" in repo_id or "vits-mms" in repo_id: | |
data_dir = "" | |
model = get_file( | |
repo_id=repo_id, | |
filename=f"{name}.onnx", | |
subfolder=".", | |
) | |
tokens = get_file( | |
repo_id=repo_id, | |
filename="tokens.txt", | |
subfolder=".", | |
) | |
tts_config = sherpa_onnx.OfflineTtsConfig( | |
model=sherpa_onnx.OfflineTtsModelConfig( | |
vits=sherpa_onnx.OfflineTtsVitsModelConfig( | |
model=model, | |
lexicon="", | |
data_dir=data_dir, | |
tokens=tokens, | |
length_scale=1.0 / speed, | |
), | |
provider="cpu", | |
debug=True, | |
num_threads=2, | |
) | |
) | |
tts = sherpa_onnx.OfflineTts(tts_config) | |
return tts | |
def _get_vits_mms(repo_id: str, speed: float) -> sherpa_onnx.OfflineTts: | |
return _get_vits_piper(repo_id, speed) | |
def _get_vits_zh_aishell3(repo_id: str, speed: float) -> sherpa_onnx.OfflineTts: | |
assert repo_id == "csukuangfj/vits-zh-aishell3" | |
model = get_file( | |
repo_id=repo_id, | |
filename="vits-aishell3.onnx", | |
subfolder=".", | |
) | |
lexicon = get_file( | |
repo_id=repo_id, | |
filename="lexicon.txt", | |
subfolder=".", | |
) | |
tokens = get_file( | |
repo_id=repo_id, | |
filename="tokens.txt", | |
subfolder=".", | |
) | |
rule_fst = get_file( | |
repo_id=repo_id, | |
filename="rule.fst", | |
subfolder=".", | |
) | |
tts_config = sherpa_onnx.OfflineTtsConfig( | |
model=sherpa_onnx.OfflineTtsModelConfig( | |
vits=sherpa_onnx.OfflineTtsVitsModelConfig( | |
model=model, | |
lexicon=lexicon, | |
tokens=tokens, | |
length_scale=1.0 / speed, | |
), | |
provider="cpu", | |
debug=True, | |
num_threads=2, | |
), | |
rule_fsts=rule_fst, | |
) | |
tts = sherpa_onnx.OfflineTts(tts_config) | |
return tts | |
def _get_vits_hf(repo_id: str, speed: float) -> sherpa_onnx.OfflineTts: | |
if "fanchen" in repo_id or "vits-cantonese-hf-xiaomaiiwn" in repo_id: | |
model = repo_id.split("/")[-1] | |
else: | |
model = repo_id.split("-")[-1] | |
model = get_file( | |
repo_id=repo_id, | |
filename=f"{model}.onnx", | |
subfolder=".", | |
) | |
lexicon = get_file( | |
repo_id=repo_id, | |
filename="lexicon.txt", | |
subfolder=".", | |
) | |
tokens = get_file( | |
repo_id=repo_id, | |
filename="tokens.txt", | |
subfolder=".", | |
) | |
rule_fst = get_file( | |
repo_id=repo_id, | |
filename="rule.fst", | |
subfolder=".", | |
) | |
tts_config = sherpa_onnx.OfflineTtsConfig( | |
model=sherpa_onnx.OfflineTtsModelConfig( | |
vits=sherpa_onnx.OfflineTtsVitsModelConfig( | |
model=model, | |
lexicon=lexicon, | |
tokens=tokens, | |
length_scale=1.0 / speed, | |
), | |
provider="cpu", | |
debug=True, | |
num_threads=2, | |
), | |
rule_fsts=rule_fst, | |
) | |
tts = sherpa_onnx.OfflineTts(tts_config) | |
return tts | |
def get_pretrained_model(repo_id: str, speed: float) -> sherpa_onnx.OfflineTts: | |
if repo_id in spanish_models: | |
return spanish_models[repo_id](repo_id, speed) | |
else: | |
raise ValueError(f"Unsupported repo_id: {repo_id}") | |
spanish_models = { | |
"csukuangfj/vits-piper-es_ES-sharvard-medium": _get_vits_piper, # 2 speakers | |
} | |
language_to_models = { | |
"Spanish": list(spanish_models.keys()), | |
} | |