Spaces:
Sleeping
Sleeping
Update Chainlit output
Browse files
app.py
CHANGED
@@ -94,11 +94,12 @@ async def main(message):
|
|
94 |
# Call the LLM with the formatted prompt
|
95 |
# response = llm.invoke(formatted_prompt)
|
96 |
#
|
|
|
|
|
97 |
response = retrieval_augmented_qa_chain.invoke({"question" : message.content })
|
98 |
answer_content = response["response"].content
|
99 |
msg = cl.Message(content="")
|
100 |
-
|
101 |
-
# print(f"Number of found context: {len(response['context'])}")
|
102 |
for i in range(0, len(answer_content), 50): # Adjust chunk size (e.g., 50 characters)
|
103 |
chunk = answer_content[i:i+50]
|
104 |
await msg.stream_token(chunk)
|
@@ -109,4 +110,23 @@ async def main(message):
|
|
109 |
context_documents = response["context"]
|
110 |
num_contexts = len(context_documents)
|
111 |
context_msg = f"Number of found context: {num_contexts}"
|
112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
# Call the LLM with the formatted prompt
|
95 |
# response = llm.invoke(formatted_prompt)
|
96 |
#
|
97 |
+
MAX_PREVIEW_LENGTH = 100
|
98 |
+
|
99 |
response = retrieval_augmented_qa_chain.invoke({"question" : message.content })
|
100 |
answer_content = response["response"].content
|
101 |
msg = cl.Message(content="")
|
102 |
+
|
|
|
103 |
for i in range(0, len(answer_content), 50): # Adjust chunk size (e.g., 50 characters)
|
104 |
chunk = answer_content[i:i+50]
|
105 |
await msg.stream_token(chunk)
|
|
|
110 |
context_documents = response["context"]
|
111 |
num_contexts = len(context_documents)
|
112 |
context_msg = f"Number of found context: {num_contexts}"
|
113 |
+
|
114 |
+
|
115 |
+
await cl.Message(content=context_msg).send()
|
116 |
+
|
117 |
+
for doc in context_documents:
|
118 |
+
document_title = doc.metadata.get("source", "Unknown Document")
|
119 |
+
document_id = doc.metadata.get("document_id", "Unknown ID")
|
120 |
+
chunk_number = doc.metadata.get("chunk_number", "Unknown Chunk")
|
121 |
+
|
122 |
+
document_context = doc.page_content.strip()
|
123 |
+
truncated_context = document_context[:MAX_PREVIEW_LENGTH] + ("..." if len(document_context) > MAX_PREVIEW_LENGTH else "")
|
124 |
+
print("----------------------------------------")
|
125 |
+
print(truncated_context)
|
126 |
+
|
127 |
+
await cl.Message(
|
128 |
+
content=f"**{document_title} ( Chunk: {chunk_number})**",
|
129 |
+
elements=[
|
130 |
+
cl.Text(content=truncated_context, display="inline")
|
131 |
+
]
|
132 |
+
).send()
|