girishwangikar commited on
Commit
aa8c099
Β·
verified Β·
1 Parent(s): b9fe0c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -8,6 +8,7 @@ from langchain.chains import create_retrieval_chain
8
  from langchain_community.vectorstores import FAISS
9
  from langchain_community.document_loaders import PyPDFLoader
10
  from langchain_community.embeddings import HuggingFaceEmbeddings
 
11
  from dotenv import load_dotenv
12
 
13
  # Load environment variables
@@ -52,8 +53,6 @@ def clear_knowledge_base():
52
  vectors = None # Reset the vector store
53
  return "Knowledge base cleared."
54
 
55
- # Function to process questions
56
- # Function to process questions
57
  def process_question(question):
58
  global vectors
59
  if vectors is None:
@@ -62,7 +61,7 @@ def process_question(question):
62
  # Create document retrieval chain
63
  retriever = vectors.as_retriever(search_type="similarity", search_kwargs={"k": 5})
64
 
65
- # Use the invoke method instead of get_relevant_documents
66
  documents = retriever.invoke(question)
67
 
68
  if not documents:
@@ -70,16 +69,18 @@ def process_question(question):
70
 
71
  # Create context from retrieved documents
72
  context = "\n\n".join([doc.page_content for doc in documents])
 
 
 
73
 
74
- # Use the invoke method for the LLM
75
- response = llm.invoke({"context": context, "input": question})
76
 
77
  # Confidence score as average relevance
78
  confidence_score = sum([doc.metadata.get('score', 0) for doc in documents]) / len(documents)
79
 
80
  return response, context, round(confidence_score, 2)
81
 
82
-
83
  # CSS styling
84
  CSS = """
85
  .duplicate-button { margin: auto !important; color: white !important; background: black !important; border-radius: 100vh !important;}
 
8
  from langchain_community.vectorstores import FAISS
9
  from langchain_community.document_loaders import PyPDFLoader
10
  from langchain_community.embeddings import HuggingFaceEmbeddings
11
+ from langchain.prompts import PromptTemplate
12
  from dotenv import load_dotenv
13
 
14
  # Load environment variables
 
53
  vectors = None # Reset the vector store
54
  return "Knowledge base cleared."
55
 
 
 
56
  def process_question(question):
57
  global vectors
58
  if vectors is None:
 
61
  # Create document retrieval chain
62
  retriever = vectors.as_retriever(search_type="similarity", search_kwargs={"k": 5})
63
 
64
+ # Use the invoke method for retrieving relevant documents
65
  documents = retriever.invoke(question)
66
 
67
  if not documents:
 
69
 
70
  # Create context from retrieved documents
71
  context = "\n\n".join([doc.page_content for doc in documents])
72
+
73
+ # Combine context and question into a single string (formatted input for LLM)
74
+ prompt = f"Answer the question based on the provided context.\n\nContext: {context}\n\nQuestion: {question}"
75
 
76
+ # Pass the string to llm.invoke
77
+ response = llm.invoke(prompt)
78
 
79
  # Confidence score as average relevance
80
  confidence_score = sum([doc.metadata.get('score', 0) for doc in documents]) / len(documents)
81
 
82
  return response, context, round(confidence_score, 2)
83
 
 
84
  # CSS styling
85
  CSS = """
86
  .duplicate-button { margin: auto !important; color: white !important; background: black !important; border-radius: 100vh !important;}