Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -45,26 +45,6 @@ llm = Llama.from_pretrained(
|
|
45 |
verbose=False
|
46 |
)
|
47 |
|
48 |
-
def text_to_speech(text):
|
49 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as temp_file:
|
50 |
-
with locker:
|
51 |
-
samples = tts.synthesize(text, "92")
|
52 |
-
output_file = temp_file.name
|
53 |
-
with wave.open(f"{output_file}", "w") as fp:
|
54 |
-
fp.setparams((1, 2, tts.get_sampling_rate(), len(samples), "NONE", "NONE"))
|
55 |
-
samples = np.ascontiguousarray(samples)
|
56 |
-
fp.writeframes(samples)
|
57 |
-
return output_file
|
58 |
-
|
59 |
-
|
60 |
-
def combine_audio_files(audio_files):
|
61 |
-
combined = AudioSegment.empty()
|
62 |
-
for audio_file in audio_files:
|
63 |
-
segment = AudioSegment.from_wav(audio_file)
|
64 |
-
combined += segment
|
65 |
-
os.remove(audio_file) # Remove temporary files
|
66 |
-
return combined
|
67 |
-
|
68 |
#guardrail model
|
69 |
guard_llm = "llama-3.1-8b-instant"
|
70 |
|
@@ -119,14 +99,11 @@ async def greet(product,description):
|
|
119 |
]
|
120 |
response = client.chat.completions.create(model=guard_llm, messages=messages, temperature=0)
|
121 |
if response.choices[0].message.content != "not moderated":
|
122 |
-
audio_files = []
|
123 |
a_list = ["Sorry, I can't proceed for generating marketing email. Your content needs to be moderated first. Thank you!"]
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
yield (final_audio.frame_rate, final_audio)
|
128 |
else:
|
129 |
-
audio_files = []
|
130 |
output = llm.create_chat_completion(
|
131 |
messages=[
|
132 |
{
|
@@ -143,10 +120,9 @@ async def greet(product,description):
|
|
143 |
for chunk in output:
|
144 |
delta = chunk['choices'][0]['delta']
|
145 |
if 'content' in delta:
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
yield (final_audio.frame_rate, final_audio)
|
150 |
|
151 |
demo = gr.Interface(fn=greet, inputs=["text","text"], outputs=gr.Audio(), concurrency_limit=10)
|
152 |
demo.launch()
|
|
|
45 |
verbose=False
|
46 |
)
|
47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
#guardrail model
|
49 |
guard_llm = "llama-3.1-8b-instant"
|
50 |
|
|
|
99 |
]
|
100 |
response = client.chat.completions.create(model=guard_llm, messages=messages, temperature=0)
|
101 |
if response.choices[0].message.content != "not moderated":
|
|
|
102 |
a_list = ["Sorry, I can't proceed for generating marketing email. Your content needs to be moderated first. Thank you!"]
|
103 |
+
with locker:
|
104 |
+
samples = tts.synthesize(a_list[0], "92")
|
105 |
+
yield (tts.get_sampling_rate(), samples)
|
|
|
106 |
else:
|
|
|
107 |
output = llm.create_chat_completion(
|
108 |
messages=[
|
109 |
{
|
|
|
120 |
for chunk in output:
|
121 |
delta = chunk['choices'][0]['delta']
|
122 |
if 'content' in delta:
|
123 |
+
with locker:
|
124 |
+
samples = tts.synthesize(delta.get('content', ''), "92")
|
125 |
+
yield (tts.get_sampling_rate(), samples)
|
|
|
126 |
|
127 |
demo = gr.Interface(fn=greet, inputs=["text","text"], outputs=gr.Audio(), concurrency_limit=10)
|
128 |
demo.launch()
|