Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -36,39 +36,54 @@ from TTS.utils.generic_utils import get_user_data_dir
|
|
36 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
37 |
|
38 |
from huggingface_hub import HfApi
|
|
|
|
|
39 |
|
40 |
-
#
|
41 |
api = HfApi(token=HF_TOKEN)
|
42 |
-
repo_id = "
|
43 |
|
44 |
-
#
|
45 |
-
|
46 |
-
|
47 |
-
print("Make ffmpeg binary executable")
|
48 |
-
st = os.stat("ffmpeg")
|
49 |
-
os.chmod("ffmpeg", st.st_mode | stat.S_IEXEC)
|
50 |
|
51 |
-
#
|
52 |
-
|
53 |
-
from TTS.utils.manage import ModelManager
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
config = XttsConfig()
|
61 |
-
config.load_json(
|
62 |
|
63 |
model = Xtts.init_from_config(config)
|
64 |
model.load_checkpoint(
|
65 |
config,
|
66 |
-
checkpoint_path=
|
67 |
-
vocab_path=
|
68 |
eval=True,
|
69 |
-
use_deepspeed=
|
70 |
)
|
71 |
-
|
|
|
|
|
|
|
|
|
72 |
|
73 |
# This is for debugging purposes only
|
74 |
DEVICE_ASSERT_DETECTED = 0
|
|
|
36 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
37 |
|
38 |
from huggingface_hub import HfApi
|
39 |
+
import os
|
40 |
+
from TTS.utils.manage import get_user_data_dir # Asegúrate de importar esta función si no lo estaba
|
41 |
|
42 |
+
# Autenticación con la API de Hugging Face
|
43 |
api = HfApi(token=HF_TOKEN)
|
44 |
+
repo_id = "Blakus/XTTS_custom"
|
45 |
|
46 |
+
# Definir el directorio local usando la misma ruta que el código original
|
47 |
+
model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
|
48 |
+
local_dir = os.path.join(get_user_data_dir("tts"), model_name.replace("/", "--"))
|
|
|
|
|
|
|
49 |
|
50 |
+
# Crear el directorio si no existe
|
51 |
+
os.makedirs(local_dir, exist_ok=True)
|
|
|
52 |
|
53 |
+
# Lista de archivos necesarios
|
54 |
+
files_to_download = ["config.json", "model.pth", "vocab.json"]
|
55 |
+
|
56 |
+
# Descargar cada archivo del repositorio
|
57 |
+
for file_name in files_to_download:
|
58 |
+
print(f"Downloading {file_name} from {repo_id}")
|
59 |
+
api.download_file(
|
60 |
+
repo_id=repo_id,
|
61 |
+
filename=file_name,
|
62 |
+
local_dir=local_dir,
|
63 |
+
repo_type="model" # Puede ser 'model', 'dataset' o 'space'
|
64 |
+
)
|
65 |
+
|
66 |
+
# Cargar configuración y modelo
|
67 |
+
config_path = os.path.join(local_dir, "config.json")
|
68 |
+
checkpoint_path = os.path.join(local_dir, "model.pth")
|
69 |
+
vocab_path = os.path.join(local_dir, "vocab.json")
|
70 |
|
71 |
config = XttsConfig()
|
72 |
+
config.load_json(config_path)
|
73 |
|
74 |
model = Xtts.init_from_config(config)
|
75 |
model.load_checkpoint(
|
76 |
config,
|
77 |
+
checkpoint_path=checkpoint_path,
|
78 |
+
vocab_path=vocab_path,
|
79 |
eval=True,
|
80 |
+
use_deepspeed=False, # Asegúrate de que no intente usar DeepSpeed
|
81 |
)
|
82 |
+
|
83 |
+
# No transfieras el modelo a la GPU, elimina o comenta esta línea:
|
84 |
+
# model.cuda()
|
85 |
+
|
86 |
+
print("Modelo cargado en CPU")
|
87 |
|
88 |
# This is for debugging purposes only
|
89 |
DEVICE_ASSERT_DETECTED = 0
|