Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -203,7 +203,7 @@ def manage_conversation_history(question, answer, history, max_history=5):
|
|
203 |
history.pop(0)
|
204 |
return history
|
205 |
|
206 |
-
def is_related_to_history(question, history, threshold=0.
|
207 |
if not history:
|
208 |
return False
|
209 |
history_text = " ".join([f"{h['question']} {h['answer']}" for h in history])
|
@@ -632,7 +632,6 @@ def ask_question(question, temperature, top_p, repetition_penalty, web_search, g
|
|
632 |
model = get_model(temperature, top_p, repetition_penalty)
|
633 |
embed = get_embeddings()
|
634 |
|
635 |
-
# Check if the FAISS database exists
|
636 |
if os.path.exists("faiss_database"):
|
637 |
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
|
638 |
else:
|
@@ -686,12 +685,13 @@ def ask_question(question, temperature, top_p, repetition_penalty, web_search, g
|
|
686 |
|
687 |
history_str = "\n".join([f"Q: {item['question']}\nA: {item['answer']}" for item in conversation_history])
|
688 |
|
689 |
-
|
690 |
-
|
691 |
-
|
692 |
-
|
693 |
-
|
694 |
-
|
|
|
695 |
|
696 |
prompt_val = ChatPromptTemplate.from_template(prompt)
|
697 |
formatted_prompt = prompt_val.format(history=history_str, context=context_str, question=question)
|
|
|
203 |
history.pop(0)
|
204 |
return history
|
205 |
|
206 |
+
def is_related_to_history(question, history, threshold=0.5): # Increased threshold from 0.3 to 0.5
|
207 |
if not history:
|
208 |
return False
|
209 |
history_text = " ".join([f"{h['question']} {h['answer']}" for h in history])
|
|
|
632 |
model = get_model(temperature, top_p, repetition_penalty)
|
633 |
embed = get_embeddings()
|
634 |
|
|
|
635 |
if os.path.exists("faiss_database"):
|
636 |
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
|
637 |
else:
|
|
|
685 |
|
686 |
history_str = "\n".join([f"Q: {item['question']}\nA: {item['answer']}" for item in conversation_history])
|
687 |
|
688 |
+
# Always retrieve relevant documents
|
689 |
+
retriever = database.as_retriever()
|
690 |
+
relevant_docs = retriever.get_relevant_documents(question)
|
691 |
+
doc_context = "\n".join([doc.page_content for doc in relevant_docs])
|
692 |
+
|
693 |
+
# Combine document context with conversation history
|
694 |
+
context_str = f"Document context:\n{doc_context}\n\nConversation history:\n{history_str}"
|
695 |
|
696 |
prompt_val = ChatPromptTemplate.from_template(prompt)
|
697 |
formatted_prompt = prompt_val.format(history=history_str, context=context_str, question=question)
|