Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,7 @@ import numpy as np
|
|
11 |
import spaces
|
12 |
import logging
|
13 |
from huggingface_hub import login
|
|
|
14 |
|
15 |
|
16 |
torch._dynamo.config.disable = True
|
@@ -28,36 +29,38 @@ if hf_token:
|
|
28 |
tts_model = None
|
29 |
speakers_dict = None
|
30 |
model_initialized = False
|
|
|
31 |
|
32 |
@spaces.GPU()
|
33 |
def initialize_model():
|
34 |
"""Initialize the TTS model and speakers - called once with GPU context"""
|
35 |
-
global tts_model, speakers_dict, model_initialized
|
36 |
-
|
37 |
-
if not model_initialized:
|
38 |
-
logger.info("Initializing Bambara TTS model...")
|
39 |
-
|
40 |
-
try:
|
41 |
-
from maliba_ai.tts.inference import BambaraTTSInference
|
42 |
-
from maliba_ai.config.speakers import Adame, Moussa, Bourama, Modibo, Seydou
|
43 |
-
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
"Adame": Adame,
|
49 |
-
"Moussa": Moussa,
|
50 |
-
"Bourama": Bourama,
|
51 |
-
"Modibo": Modibo,
|
52 |
-
"Seydou": Seydou
|
53 |
-
}
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
return tts_model, speakers_dict
|
63 |
|
@@ -78,14 +81,14 @@ def validate_inputs(text, temperature, top_k, top_p, max_tokens):
|
|
78 |
|
79 |
@spaces.GPU()
|
80 |
def generate_speech(text, speaker_name, use_advanced, temperature, top_k, top_p, max_tokens):
|
81 |
-
|
82 |
-
|
83 |
if not text.strip():
|
84 |
return None, "Please enter some Bambara text."
|
85 |
|
86 |
try:
|
87 |
-
|
88 |
-
|
|
|
89 |
|
90 |
speaker = speakers[speaker_name]
|
91 |
|
|
|
11 |
import spaces
|
12 |
import logging
|
13 |
from huggingface_hub import login
|
14 |
+
import threading
|
15 |
|
16 |
|
17 |
torch._dynamo.config.disable = True
|
|
|
29 |
tts_model = None
|
30 |
speakers_dict = None
|
31 |
model_initialized = False
|
32 |
+
model_initialized_lock = threading.Lock()
|
33 |
|
34 |
@spaces.GPU()
|
35 |
def initialize_model():
|
36 |
"""Initialize the TTS model and speakers - called once with GPU context"""
|
37 |
+
global tts_model, speakers_dict, model_initialized, model_initialized_lock
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
with model_initialized_lock :
|
40 |
+
if not model_initialized:
|
41 |
+
logger.info("Initializing Bambara TTS model...")
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
+
try:
|
44 |
+
from maliba_ai.tts.inference import BambaraTTSInference
|
45 |
+
from maliba_ai.config.speakers import Adame, Moussa, Bourama, Modibo, Seydou
|
46 |
+
|
47 |
+
|
48 |
+
tts_model = BambaraTTSInference()
|
49 |
+
|
50 |
+
speakers_dict = {
|
51 |
+
"Adame": Adame,
|
52 |
+
"Moussa": Moussa,
|
53 |
+
"Bourama": Bourama,
|
54 |
+
"Modibo": Modibo,
|
55 |
+
"Seydou": Seydou
|
56 |
+
}
|
57 |
+
|
58 |
+
model_initialized = True
|
59 |
+
logger.info("Model initialized successfully!")
|
60 |
+
|
61 |
+
except Exception as e:
|
62 |
+
logger.error(f"Failed to initialize model: {e}")
|
63 |
+
raise e
|
64 |
|
65 |
return tts_model, speakers_dict
|
66 |
|
|
|
81 |
|
82 |
@spaces.GPU()
|
83 |
def generate_speech(text, speaker_name, use_advanced, temperature, top_k, top_p, max_tokens):
|
84 |
+
global tts_model, speakers_dict, model_initialized, model_initialized_lock
|
|
|
85 |
if not text.strip():
|
86 |
return None, "Please enter some Bambara text."
|
87 |
|
88 |
try:
|
89 |
+
with model_initialized_lock :
|
90 |
+
if not model_initialized :
|
91 |
+
tts, speakers = initialize_model()
|
92 |
|
93 |
speaker = speakers[speaker_name]
|
94 |
|