Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -19,7 +19,7 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
19 |
|
20 |
|
21 |
from langchain import HuggingFacePipeline
|
22 |
-
|
23 |
from langchain.prompts import PromptTemplate
|
24 |
from langchain.memory import ConversationBufferWindowMemory
|
25 |
|
@@ -93,16 +93,9 @@ def load_conversational_qa_memory_retriever():
|
|
93 |
get_chat_history=lambda h :h)
|
94 |
return conversational_qa_memory_retriever, question_generator
|
95 |
|
96 |
-
def load_retriever(llm, db):
|
97 |
-
qa_retriever = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff",
|
98 |
-
retriever=db.as_retriever(),
|
99 |
-
chain_type_kwargs= chain_type_kwargs)
|
100 |
|
101 |
-
return qa_retriever
|
102 |
|
103 |
-
|
104 |
-
related_doc = vector_database.similarity_search(query_input)
|
105 |
-
return related_doc
|
106 |
|
107 |
|
108 |
|
@@ -200,13 +193,6 @@ datetime_format= "%Y-%m-%d %H:%M:%S"
|
|
200 |
|
201 |
|
202 |
|
203 |
-
load_scraped_web_info()
|
204 |
-
embedding_model = load_embedding_model()
|
205 |
-
vector_database = load_faiss_index()
|
206 |
-
llm_model = load_llm_model()
|
207 |
-
qa_retriever = load_retriever(llm= llm_model, db= vector_database)
|
208 |
-
conversational_qa_memory_retriever, question_generator = load_conversational_qa_memory_retriever()
|
209 |
-
print("all load done")
|
210 |
|
211 |
|
212 |
# Try adding this to set to clear the memory in each session
|
|
|
19 |
|
20 |
|
21 |
from langchain import HuggingFacePipeline
|
22 |
+
|
23 |
from langchain.prompts import PromptTemplate
|
24 |
from langchain.memory import ConversationBufferWindowMemory
|
25 |
|
|
|
93 |
get_chat_history=lambda h :h)
|
94 |
return conversational_qa_memory_retriever, question_generator
|
95 |
|
|
|
|
|
|
|
|
|
96 |
|
|
|
97 |
|
98 |
+
|
|
|
|
|
99 |
|
100 |
|
101 |
|
|
|
193 |
|
194 |
|
195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
|
197 |
|
198 |
# Try adding this to set to clear the memory in each session
|