Spaces:
Sleeping
Sleeping
Sam
commited on
Commit
·
102b12e
1
Parent(s):
a6c6d28
Addressing asynf for error to enable streaming responses in Chainlit chatbot
Browse files
app.py
CHANGED
@@ -161,21 +161,11 @@ async def start_chat():
|
|
161 |
async def handle_message(message: cl.Message):
|
162 |
settings = cl.user_session.get("settings")
|
163 |
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
# Create a generator from the RAG chain
|
170 |
-
response_generator = retrieval_augmented_qa_chain.stream({"question": message.content})
|
171 |
-
|
172 |
-
async for response_chunk in response_generator:
|
173 |
-
# Extract the content from the chunk
|
174 |
-
chunk_content = response_chunk.get("response", {}).get("content", "")
|
175 |
-
if chunk_content:
|
176 |
-
# Append the chunk to the streaming message content
|
177 |
-
stream_msg.content += chunk_content
|
178 |
-
await stream_msg.update() # Update the message in Chainlit
|
179 |
|
180 |
## Remove to stream the response
|
181 |
# response = retrieval_augmented_qa_chain.invoke({"question": message.content})
|
|
|
161 |
async def handle_message(message: cl.Message):
|
162 |
settings = cl.user_session.get("settings")
|
163 |
|
164 |
+
# Stream the response as it is generated
|
165 |
+
async with cl.Message(content="") as msg:
|
166 |
+
async for token in retrieval_augmented_qa_chain.stream({"question": message.content}):
|
167 |
+
# Append each token to the message
|
168 |
+
await msg.stream_token(token)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
## Remove to stream the response
|
171 |
# response = retrieval_augmented_qa_chain.invoke({"question": message.content})
|