mainfile cgpt 6
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
|
|
3 |
from langchain.vectorstores import Chroma
|
4 |
from langchain.embeddings import HuggingFaceBgeEmbeddings
|
5 |
from langchain_together import Together
|
@@ -81,6 +82,19 @@ def create_conversational_qa_chain(retriever, condense_llm, answer_llm):
|
|
81 |
generate_answer=answer_chain
|
82 |
)
|
83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
# Define the Streamlit app
|
85 |
def app():
|
86 |
with st.sidebar:
|
@@ -130,17 +144,7 @@ def app():
|
|
130 |
conversational_qa_chain = create_conversational_qa_chain(retriever, llmc, llm)
|
131 |
with st.chat_message("assistant"):
|
132 |
with st.spinner("Thinking..."):
|
133 |
-
|
134 |
-
async for chunk in conversational_qa_chain.astream(
|
135 |
-
{
|
136 |
-
"question": prompts2,
|
137 |
-
"chat_history": chistory,
|
138 |
-
}
|
139 |
-
):
|
140 |
-
response_chunks.append(chunk['generate_answer'])
|
141 |
-
st.write("".join(response_chunks))
|
142 |
-
|
143 |
-
final_response = "".join(response_chunks)
|
144 |
message = {"role": "assistant", "content": final_response}
|
145 |
st.session_state.messages.append(message)
|
146 |
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
+
import asyncio
|
4 |
from langchain.vectorstores import Chroma
|
5 |
from langchain.embeddings import HuggingFaceBgeEmbeddings
|
6 |
from langchain_together import Together
|
|
|
82 |
generate_answer=answer_chain
|
83 |
)
|
84 |
|
85 |
+
# Asynchronous function to handle streaming responses
|
86 |
+
async def stream_response(conversational_qa_chain, prompts2, chistory):
|
87 |
+
response_chunks = []
|
88 |
+
async for chunk in conversational_qa_chain.astream(
|
89 |
+
{
|
90 |
+
"question": prompts2,
|
91 |
+
"chat_history": chistory,
|
92 |
+
}
|
93 |
+
):
|
94 |
+
response_chunks.append(chunk['generate_answer'])
|
95 |
+
st.write("".join(response_chunks))
|
96 |
+
return "".join(response_chunks)
|
97 |
+
|
98 |
# Define the Streamlit app
|
99 |
def app():
|
100 |
with st.sidebar:
|
|
|
144 |
conversational_qa_chain = create_conversational_qa_chain(retriever, llmc, llm)
|
145 |
with st.chat_message("assistant"):
|
146 |
with st.spinner("Thinking..."):
|
147 |
+
final_response = asyncio.run(stream_response(conversational_qa_chain, prompts2, chistory))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
message = {"role": "assistant", "content": final_response}
|
149 |
st.session_state.messages.append(message)
|
150 |
|