Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ import numpy as np
|
|
6 |
import wave
|
7 |
|
8 |
#tts
|
9 |
-
from balacoon_tts import TTS
|
10 |
from threading import Lock
|
11 |
from huggingface_hub import hf_hub_download, list_repo_files
|
12 |
from pydub import AudioSegment
|
@@ -28,9 +28,6 @@ for name in list_repo_files(repo_id="balacoon/tts"):
|
|
28 |
local_dir=os.getcwd(),
|
29 |
)
|
30 |
|
31 |
-
# locker that disallow access to the tts object from more then one thread
|
32 |
-
locker = Lock()
|
33 |
-
|
34 |
#client
|
35 |
client = Groq(
|
36 |
api_key=os.getenv("GROQ_API_KEY"),
|
@@ -101,15 +98,14 @@ async def greet(product,description):
|
|
101 |
if response.choices[0].message.content != "not moderated":
|
102 |
a_list = ["Sorry, I can't proceed for generating marketing email. Your content needs to be moderated first. Thank you!"]
|
103 |
tts = TTS(os.path.join(os.getcwd(), tts_model_str))
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
yield gr.Audio(value=(tts.get_sampling_rate(), samples))
|
113 |
else:
|
114 |
output = llm.create_chat_completion(
|
115 |
messages=[
|
@@ -128,16 +124,11 @@ async def greet(product,description):
|
|
128 |
for chunk in output:
|
129 |
delta = chunk['choices'][0]['delta']
|
130 |
if 'content' in delta:
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
samples = tts.synthesize_chunk(utterance, tts.get_speakers()[-1])
|
137 |
-
if len(samples) == 0:
|
138 |
-
# all the samples were already generated
|
139 |
-
break
|
140 |
-
yield gr.Audio(value=(tts.get_sampling_rate(), samples))
|
141 |
|
142 |
demo = gr.Interface(fn=greet, inputs=["text","text"], outputs=gr.Audio(), concurrency_limit=10)
|
143 |
demo.launch()
|
|
|
6 |
import wave
|
7 |
|
8 |
#tts
|
9 |
+
from balacoon_tts import TTS
|
10 |
from threading import Lock
|
11 |
from huggingface_hub import hf_hub_download, list_repo_files
|
12 |
from pydub import AudioSegment
|
|
|
28 |
local_dir=os.getcwd(),
|
29 |
)
|
30 |
|
|
|
|
|
|
|
31 |
#client
|
32 |
client = Groq(
|
33 |
api_key=os.getenv("GROQ_API_KEY"),
|
|
|
98 |
if response.choices[0].message.content != "not moderated":
|
99 |
a_list = ["Sorry, I can't proceed for generating marketing email. Your content needs to be moderated first. Thank you!"]
|
100 |
tts = TTS(os.path.join(os.getcwd(), tts_model_str))
|
101 |
+
speakers = tts.get_speakers()
|
102 |
+
if len(a_list[0]) > 1024:
|
103 |
+
# truncate the text
|
104 |
+
text_str = a_list[0][:1024]
|
105 |
+
else:
|
106 |
+
text_str = a_list[0]
|
107 |
+
samples = tts.synthesize(text_str, speakers[-1])
|
108 |
+
yield gr.Audio(value=(tts.get_sampling_rate(), samples))
|
|
|
109 |
else:
|
110 |
output = llm.create_chat_completion(
|
111 |
messages=[
|
|
|
124 |
for chunk in output:
|
125 |
delta = chunk['choices'][0]['delta']
|
126 |
if 'content' in delta:
|
127 |
+
tts = TTS(os.path.join(os.getcwd(), tts_model_str))
|
128 |
+
speakers = tts.get_speakers()
|
129 |
+
samples = tts.synthesize(delta.get('content', ''), speakers[-1])
|
130 |
+
yield (tts.get_sampling_rate(), samples)
|
131 |
+
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
demo = gr.Interface(fn=greet, inputs=["text","text"], outputs=gr.Audio(), concurrency_limit=10)
|
134 |
demo.launch()
|