mainfile cgpt 5
Browse files
app.py
CHANGED
@@ -67,7 +67,7 @@ def store_chat_history(role: str, content: str):
|
|
67 |
def create_conversational_qa_chain(retriever, condense_llm, answer_llm):
|
68 |
condense_question_chain = RunnableLambda(
|
69 |
lambda x: {"chat_history": chistory, "question": x['question']}
|
70 |
-
) | CONDENSE_QUESTION_PROMPT |
|
71 |
|
72 |
retrieval_chain = RunnableLambda(
|
73 |
lambda x: x['standalone_question']
|
@@ -130,14 +130,18 @@ def app():
|
|
130 |
conversational_qa_chain = create_conversational_qa_chain(retriever, llmc, llm)
|
131 |
with st.chat_message("assistant"):
|
132 |
with st.spinner("Thinking..."):
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
|
|
|
|
|
|
|
|
141 |
st.session_state.messages.append(message)
|
142 |
|
143 |
if __name__ == '__main__':
|
|
|
67 |
def create_conversational_qa_chain(retriever, condense_llm, answer_llm):
|
68 |
condense_question_chain = RunnableLambda(
|
69 |
lambda x: {"chat_history": chistory, "question": x['question']}
|
70 |
+
) | CONDENSE_QUESTION_PROMPT | RunnableLambda(lambda x: {"standalone_question": x})
|
71 |
|
72 |
retrieval_chain = RunnableLambda(
|
73 |
lambda x: x['standalone_question']
|
|
|
130 |
conversational_qa_chain = create_conversational_qa_chain(retriever, llmc, llm)
|
131 |
with st.chat_message("assistant"):
|
132 |
with st.spinner("Thinking..."):
|
133 |
+
response_chunks = []
|
134 |
+
async for chunk in conversational_qa_chain.astream(
|
135 |
+
{
|
136 |
+
"question": prompts2,
|
137 |
+
"chat_history": chistory,
|
138 |
+
}
|
139 |
+
):
|
140 |
+
response_chunks.append(chunk['generate_answer'])
|
141 |
+
st.write("".join(response_chunks))
|
142 |
+
|
143 |
+
final_response = "".join(response_chunks)
|
144 |
+
message = {"role": "assistant", "content": final_response}
|
145 |
st.session_state.messages.append(message)
|
146 |
|
147 |
if __name__ == '__main__':
|