Update app.py
Browse files
app.py
CHANGED
@@ -82,14 +82,19 @@ async def chat_with_llm(file: UploadFile = File(...)):
|
|
82 |
print("user_message" + user_message.decode("utf-8") )
|
83 |
# Send to LLM
|
84 |
chat_completion = client.chat.completions.create(
|
85 |
-
messages=[
|
|
|
|
|
|
|
|
|
|
|
86 |
model="llama-3.3-70b-versatile",
|
87 |
)
|
88 |
llm_response = chat_completion.choices[0].message.content
|
89 |
-
|
90 |
# Convert response to audio
|
91 |
"""Convert text to a WAV audio file using ggwave and return as response."""
|
92 |
-
encoded_waveform = ggwave.encode(
|
93 |
|
94 |
# Convert byte data into float32 array
|
95 |
waveform_float32 = np.frombuffer(encoded_waveform, dtype=np.float32)
|
|
|
82 |
print("user_message" + user_message.decode("utf-8") )
|
83 |
# Send to LLM
|
84 |
chat_completion = client.chat.completions.create(
|
85 |
+
messages=[
|
86 |
+
{
|
87 |
+
"role": "system",
|
88 |
+
"content": "you are a helpful assistant. answer alway in one sentence"
|
89 |
+
},
|
90 |
+
{"role": "user", "content": user_message.decode("utf-8")}],
|
91 |
model="llama-3.3-70b-versatile",
|
92 |
)
|
93 |
llm_response = chat_completion.choices[0].message.content
|
94 |
+
print(llm_response)
|
95 |
# Convert response to audio
|
96 |
"""Convert text to a WAV audio file using ggwave and return as response."""
|
97 |
+
encoded_waveform = ggwave.encode(llm_response , protocolId=1, volume=100)
|
98 |
|
99 |
# Convert byte data into float32 array
|
100 |
waveform_float32 = np.frombuffer(encoded_waveform, dtype=np.float32)
|