Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,10 +12,9 @@ from huggingface_hub import hf_hub_download, list_repo_files
|
|
12 |
#tts cpu model
|
13 |
tts_model_str = "en_us_hifi_jets_cpu.addon"
|
14 |
|
15 |
-
model_repo_dir = "/data"
|
16 |
for name in list_repo_files(repo_id="balacoon/tts"):
|
17 |
if name == tts_model_str:
|
18 |
-
if not os.path.isfile(os.path.join(
|
19 |
hf_hub_download(
|
20 |
repo_id="balacoon/tts",
|
21 |
filename=name,
|
@@ -97,7 +96,7 @@ async def greet(product,description):
|
|
97 |
response = client.chat.completions.create(model=guard_llm, messages=messages, temperature=0)
|
98 |
if response.choices[0].message.content != "not moderated":
|
99 |
audio_stream = BytesIO()
|
100 |
-
tts = TTS(os.path.join(
|
101 |
a_list = ["Sorry, I can't proceed for generating marketing email. Your content needs to be moderated first. Thank you!"]
|
102 |
with locker:
|
103 |
audio_stream.write(tts.synthesize(a_list[0], speaker_str))
|
@@ -105,7 +104,7 @@ async def greet(product,description):
|
|
105 |
yield audio_stream
|
106 |
else:
|
107 |
audio_stream = BytesIO()
|
108 |
-
tts = TTS(os.path.join(
|
109 |
output = llm.create_chat_completion(
|
110 |
messages=[
|
111 |
{
|
|
|
12 |
#tts cpu model
|
13 |
tts_model_str = "en_us_hifi_jets_cpu.addon"
|
14 |
|
|
|
15 |
for name in list_repo_files(repo_id="balacoon/tts"):
|
16 |
if name == tts_model_str:
|
17 |
+
if not os.path.isfile(os.path.join(os.getcwd(), name)):
|
18 |
hf_hub_download(
|
19 |
repo_id="balacoon/tts",
|
20 |
filename=name,
|
|
|
96 |
response = client.chat.completions.create(model=guard_llm, messages=messages, temperature=0)
|
97 |
if response.choices[0].message.content != "not moderated":
|
98 |
audio_stream = BytesIO()
|
99 |
+
tts = TTS(os.path.join(os.getcwd(), tts_model_str))
|
100 |
a_list = ["Sorry, I can't proceed for generating marketing email. Your content needs to be moderated first. Thank you!"]
|
101 |
with locker:
|
102 |
audio_stream.write(tts.synthesize(a_list[0], speaker_str))
|
|
|
104 |
yield audio_stream
|
105 |
else:
|
106 |
audio_stream = BytesIO()
|
107 |
+
tts = TTS(os.path.join(os.getcwd(), tts_model_str))
|
108 |
output = llm.create_chat_completion(
|
109 |
messages=[
|
110 |
{
|