Update app.py
Browse files
app.py
CHANGED
@@ -73,6 +73,9 @@ def get_vectorstore(text_chunks):
|
|
73 |
|
74 |
|
75 |
def get_conversation_chain(vectorstore):
|
|
|
|
|
|
|
76 |
model_name_or_path = 'TheBloke/Llama-2-7B-chat-GGUF'
|
77 |
model_basename = 'llama-2-7b-chat.Q2_K.gguf'
|
78 |
model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename)
|
@@ -85,6 +88,12 @@ def get_conversation_chain(vectorstore):
|
|
85 |
memory = ConversationBufferMemory(
|
86 |
memory_key='chat_history', return_messages=True)
|
87 |
# λν κ²μ 체μΈμ μμ±ν©λλ€.
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
89 |
llm=llm,
|
90 |
retriever=vectorstore.as_retriever(),
|
|
|
73 |
|
74 |
|
75 |
def get_conversation_chain(vectorstore):
|
76 |
+
if vectorstore is None:
|
77 |
+
return None
|
78 |
+
|
79 |
model_name_or_path = 'TheBloke/Llama-2-7B-chat-GGUF'
|
80 |
model_basename = 'llama-2-7b-chat.Q2_K.gguf'
|
81 |
model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename)
|
|
|
88 |
memory = ConversationBufferMemory(
|
89 |
memory_key='chat_history', return_messages=True)
|
90 |
# λν κ²μ 체μΈμ μμ±ν©λλ€.
|
91 |
+
|
92 |
+
retriever = vectorstore.as_retriever() if hasattr(vectorstore, 'as_retriever') else None
|
93 |
+
|
94 |
+
if retriever is None:
|
95 |
+
return None
|
96 |
+
|
97 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
98 |
llm=llm,
|
99 |
retriever=vectorstore.as_retriever(),
|