Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,10 +8,13 @@ from langchain_google_genai import ChatGoogleGenerativeAI
|
|
8 |
import google.generativeai as genai
|
9 |
from langchain.chains.question_answering import load_qa_chain # Import load_qa_chain
|
10 |
|
11 |
-
# Initialize an empty list to store chat history
|
12 |
chat_history = []
|
|
|
13 |
|
14 |
async def initialize(file_path, question):
|
|
|
|
|
15 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
16 |
model = genai.GenerativeModel('gemini-pro')
|
17 |
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
|
@@ -41,8 +44,11 @@ async def initialize(file_path, question):
|
|
41 |
# Load the question-answering chain
|
42 |
stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
43 |
|
|
|
|
|
|
|
44 |
# Get the answer from the model
|
45 |
-
stuff_answer = await stuff_chain.ainvoke({"input_documents": pages, "question": question, "context":
|
46 |
answer = stuff_answer.get('output_text', '').strip()
|
47 |
|
48 |
# Identify key sentences or phrases
|
@@ -82,6 +88,9 @@ async def initialize(file_path, question):
|
|
82 |
'document_link': source_link
|
83 |
})
|
84 |
|
|
|
|
|
|
|
85 |
return f"Answer: {answer}\n{source_str}\n{source_link}"
|
86 |
else:
|
87 |
return "Error: Unable to process the document. Please ensure the PDF file is valid."
|
|
|
8 |
import google.generativeai as genai
|
9 |
from langchain.chains.question_answering import load_qa_chain # Import load_qa_chain
|
10 |
|
11 |
+
# Initialize an empty list to store chat history and context
|
12 |
chat_history = []
|
13 |
+
context_history = ""
|
14 |
|
15 |
async def initialize(file_path, question):
|
16 |
+
global context_history
|
17 |
+
|
18 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
19 |
model = genai.GenerativeModel('gemini-pro')
|
20 |
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
|
|
|
44 |
# Load the question-answering chain
|
45 |
stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
46 |
|
47 |
+
# Combine previous context with the new context
|
48 |
+
combined_context = context_history + "\n" + context
|
49 |
+
|
50 |
# Get the answer from the model
|
51 |
+
stuff_answer = await stuff_chain.ainvoke({"input_documents": pages, "question": question, "context": combined_context})
|
52 |
answer = stuff_answer.get('output_text', '').strip()
|
53 |
|
54 |
# Identify key sentences or phrases
|
|
|
88 |
'document_link': source_link
|
89 |
})
|
90 |
|
91 |
+
# Update context history
|
92 |
+
context_history += f"\nQ: {question}\nA: {answer}"
|
93 |
+
|
94 |
return f"Answer: {answer}\n{source_str}\n{source_link}"
|
95 |
else:
|
96 |
return "Error: Unable to process the document. Please ensure the PDF file is valid."
|