Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ import numpy as np
|
|
6 |
import wave
|
7 |
|
8 |
#tts
|
9 |
-
from balacoon_tts import TTS
|
10 |
from threading import Lock
|
11 |
from huggingface_hub import hf_hub_download, list_repo_files
|
12 |
from pydub import AudioSegment
|
@@ -100,16 +100,16 @@ async def greet(product,description):
|
|
100 |
response = client.chat.completions.create(model=guard_llm, messages=messages, temperature=0)
|
101 |
if response.choices[0].message.content != "not moderated":
|
102 |
a_list = ["Sorry, I can't proceed for generating marketing email. Your content needs to be moderated first. Thank you!"]
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
else:
|
114 |
output = llm.create_chat_completion(
|
115 |
messages=[
|
@@ -124,14 +124,20 @@ async def greet(product,description):
|
|
124 |
stream=True
|
125 |
)
|
126 |
partial_message = ""
|
|
|
127 |
for chunk in output:
|
128 |
delta = chunk['choices'][0]['delta']
|
129 |
if 'content' in delta:
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
demo = gr.Interface(fn=greet, inputs=["text","text"], outputs=gr.Audio(), concurrency_limit=10)
|
137 |
demo.launch()
|
|
|
6 |
import wave
|
7 |
|
8 |
#tts
|
9 |
+
from balacoon_tts import TTS, SpeechUtterance
|
10 |
from threading import Lock
|
11 |
from huggingface_hub import hf_hub_download, list_repo_files
|
12 |
from pydub import AudioSegment
|
|
|
100 |
response = client.chat.completions.create(model=guard_llm, messages=messages, temperature=0)
|
101 |
if response.choices[0].message.content != "not moderated":
|
102 |
a_list = ["Sorry, I can't proceed for generating marketing email. Your content needs to be moderated first. Thank you!"]
|
103 |
+
tts = TTS(os.path.join(os.getcwd(), tts_model_str))
|
104 |
+
# create an utterance, which will hold the syntheis state
|
105 |
+
utterance = SpeechUtterance(a_list[0])
|
106 |
+
# run synthesis in a loop
|
107 |
+
while True:
|
108 |
+
samples = tts.synthesize_chunk(utterance, tts.get_speakers()[-1])
|
109 |
+
if len(samples) == 0:
|
110 |
+
# all the samples were already generated
|
111 |
+
break
|
112 |
+
yield gr.Audio(value=(tts.get_sampling_rate(), samples))
|
113 |
else:
|
114 |
output = llm.create_chat_completion(
|
115 |
messages=[
|
|
|
124 |
stream=True
|
125 |
)
|
126 |
partial_message = ""
|
127 |
+
tts = TTS(os.path.join(os.getcwd(), tts_model_str))
|
128 |
for chunk in output:
|
129 |
delta = chunk['choices'][0]['delta']
|
130 |
if 'content' in delta:
|
131 |
+
partial_message = partial_message + delta.get('content', '')
|
132 |
+
# create an utterance, which will hold the syntheis state
|
133 |
+
utterance = SpeechUtterance(partial_message)
|
134 |
+
# run synthesis in a loop
|
135 |
+
while True:
|
136 |
+
samples = tts.synthesize_chunk(utterance, tts.get_speakers()[-1])
|
137 |
+
if len(samples) == 0:
|
138 |
+
# all the samples were already generated
|
139 |
+
break
|
140 |
+
yield gr.Audio(value=(tts.get_sampling_rate(), samples))
|
141 |
|
142 |
demo = gr.Interface(fn=greet, inputs=["text","text"], outputs=gr.Audio(), concurrency_limit=10)
|
143 |
demo.launch()
|