mainfile cgpt 7
Browse files
app.py
CHANGED
@@ -1,14 +1,15 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
import asyncio
|
4 |
-
from
|
5 |
-
from
|
6 |
from langchain_together import Together
|
7 |
from langchain import hub
|
8 |
from operator import itemgetter
|
9 |
from langchain.schema import format_document
|
10 |
from langchain.prompts import ChatPromptTemplate, PromptTemplate
|
11 |
-
from
|
|
|
12 |
from langchain_core.runnables import RunnableLambda, RunnableParallel, RunnablePassthrough
|
13 |
|
14 |
# Load the embedding function
|
@@ -68,13 +69,13 @@ def store_chat_history(role: str, content: str):
|
|
68 |
def create_conversational_qa_chain(retriever, condense_llm, answer_llm):
|
69 |
condense_question_chain = RunnableLambda(
|
70 |
lambda x: {"chat_history": chistory, "question": x['question']}
|
71 |
-
) | CONDENSE_QUESTION_PROMPT | RunnableLambda(lambda x: {"standalone_question": x})
|
72 |
|
73 |
retrieval_chain = RunnableLambda(
|
74 |
-
lambda x: x['standalone_question']
|
75 |
) | retriever | _combine_documents
|
76 |
|
77 |
-
answer_chain = ANSWER_PROMPT | answer_llm
|
78 |
|
79 |
return RunnableParallel(
|
80 |
condense_question=condense_question_chain,
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
import asyncio
|
4 |
+
from langchain_community.vectorstores import Chroma
|
5 |
+
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
|
6 |
from langchain_together import Together
|
7 |
from langchain import hub
|
8 |
from operator import itemgetter
|
9 |
from langchain.schema import format_document
|
10 |
from langchain.prompts import ChatPromptTemplate, PromptTemplate
|
11 |
+
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
|
12 |
+
from langchain.memory import ConversationBufferMemory
|
13 |
from langchain_core.runnables import RunnableLambda, RunnableParallel, RunnablePassthrough
|
14 |
|
15 |
# Load the embedding function
|
|
|
69 |
def create_conversational_qa_chain(retriever, condense_llm, answer_llm):
|
70 |
condense_question_chain = RunnableLambda(
|
71 |
lambda x: {"chat_history": chistory, "question": x['question']}
|
72 |
+
) | CONDENSE_QUESTION_PROMPT | RunnableLambda(lambda x: {"standalone_question": x['standalone_question']})
|
73 |
|
74 |
retrieval_chain = RunnableLambda(
|
75 |
+
lambda x: {"standalone_question": x['standalone_question']}
|
76 |
) | retriever | _combine_documents
|
77 |
|
78 |
+
answer_chain = ANSWER_PROMPT | RunnableLambda(lambda x: {"context": x, "question": x['standalone_question']}) | answer_llm
|
79 |
|
80 |
return RunnableParallel(
|
81 |
condense_question=condense_question_chain,
|