saritha commited on
Commit
f1ba16e
·
verified ·
1 Parent(s): e20e8c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -84
app.py CHANGED
@@ -2,133 +2,74 @@ import os
2
  import gradio as gr
3
  import asyncio
4
  from langchain_core.prompts import PromptTemplate
 
5
  from langchain_community.document_loaders import PyPDFLoader
6
  from langchain_google_genai import ChatGoogleGenerativeAI
7
  import google.generativeai as genai
8
- from langchain.chains.question_answering import load_qa_chain
9
 
10
- # Initialize a dictionary to store chat history and context per session
11
- session_contexts = {}
12
-
13
- async def initialize(file_path, question, session_id):
14
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
15
  model = genai.GenerativeModel('gemini-pro')
16
  model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
17
 
18
  # Refined prompt template to encourage precise and concise answers
19
- prompt_template = """You are a helpful assistant. Use the context provided below to answer the question precisely and concisely.
20
  If the answer is not contained in the context, respond with "answer not available in context".
21
-
22
  Context:
23
  {context}
24
 
25
- Conversation History:
26
- {history}
27
-
28
  Question:
29
  {question}
30
 
31
  Answer:
32
  """
33
- prompt = PromptTemplate(template=prompt_template, input_variables=["context", "history", "question"])
34
-
35
- # Get or initialize the context and history for the current session
36
- context_history = session_contexts.get(session_id, {"context": "", "history": ""})
37
- combined_context = context_history["context"]
38
- conversation_history = context_history["history"]
39
-
40
  if os.path.exists(file_path):
41
  pdf_loader = PyPDFLoader(file_path)
42
  pages = pdf_loader.load_and_split()
43
-
44
  # Extract content from each page and store along with page number
45
- page_contexts = [page.page_content for i, page in enumerate(pages)]
46
  context = "\n".join(page_contexts[:30]) # Using the first 30 pages for context
47
-
48
  # Load the question-answering chain
49
  stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
50
-
51
- # Combine previous context and conversation history with the new context
52
- full_context = combined_context + "\n" + context
53
- full_history = conversation_history + f"\nQ: {question}\nA: {answer}"
54
 
55
  # Get the answer from the model
56
- stuff_answer = await stuff_chain.ainvoke({"input_documents": pages, "question": question, "context": full_context, "history": full_history})
57
  answer = stuff_answer.get('output_text', '').strip()
58
-
59
  # Identify key sentences or phrases
60
  key_phrases = answer.split(". ") # Split answer into sentences for more precise matching
61
-
62
- # Score each page based on the presence of key phrases
63
- page_scores = [0] * len(pages)
64
  for i, page in enumerate(pages):
65
  for phrase in key_phrases:
66
  if phrase.lower() in page.page_content.lower():
67
- page_scores[i] += 1
68
-
69
- # Determine the top pages based on highest scores
70
- top_pages_with_scores = sorted(enumerate(page_scores), key=lambda x: x[1], reverse=True)
71
- top_pages = [i + 1 for i, score in top_pages_with_scores if score > 0][:2] # Get top 2 pages
72
 
73
- # Generate links for each top page
74
- file_name = os.path.basename(file_path)
75
- page_links = [f"[Page {p}](file://{os.path.abspath(file_path)})" for p in top_pages]
76
- page_links_str = ', '.join(page_links)
77
-
78
- if top_pages:
79
- source_str = f"Top relevant page(s): {page_links_str}"
80
  else:
81
- source_str = "Top relevant page(s): Not found in specific page"
82
-
83
- # Create a clickable link for the document
84
- source_link = f"[Document: {file_name}](file://{os.path.abspath(file_path)})"
85
-
86
- # Update session context with the new question and answer
87
- session_contexts[session_id] = {
88
- "context": full_context,
89
- "history": full_history + f"\nQ: {question}\nA: {answer}"
90
- }
91
-
92
- return f"Answer: {answer}\n{source_str}\n{source_link}"
93
  else:
94
  return "Error: Unable to process the document. Please ensure the PDF file is valid."
95
 
96
- # Define Gradio Interface for QA and Chat History
97
  input_file = gr.File(label="Upload PDF File")
98
  input_question = gr.Textbox(label="Ask about the document")
99
- output_text = gr.Textbox(label="Answer and Top Pages", lines=10, max_lines=10)
100
-
101
- def get_chat_history(session_id):
102
- if session_id in session_contexts:
103
- return session_contexts[session_id]["history"]
104
- else:
105
- return "No history available for this session."
106
 
107
- async def pdf_qa(file, question, session_id):
108
  if file is None:
109
  return "Error: No file uploaded. Please upload a PDF document."
110
 
111
- answer = await initialize(file.name, question, session_id)
112
  return answer
113
 
114
- # Create Gradio Interfaces
115
- qa_interface = gr.Interface(
116
- fn=lambda file, question, session_id: asyncio.run(pdf_qa(file, question, session_id)),
117
- inputs=[input_file, input_question, gr.Textbox(label="Session ID", placeholder="Enter a session ID to track your conversation")],
118
- outputs=output_text,
119
- title="PDF Question Answering System",
120
- description="Upload a PDF file and ask questions about the content. Provide a session ID to maintain conversation context."
121
- )
122
-
123
- history_interface = gr.Interface(
124
- fn=lambda session_id: get_chat_history(session_id),
125
- inputs=gr.Textbox(label="Session ID", placeholder="Enter a session ID to view chat history"),
126
- outputs=gr.Textbox(label="Chat History", lines=20, max_lines=20),
127
- title="Chat History",
128
- description="View the history of interactions for a specific session."
129
- )
130
-
131
- # Launch both interfaces
132
- qa_interface.launch(share=True)
133
- history_interface.launch(share=True)
134
-
 
2
  import gradio as gr
3
  import asyncio
4
  from langchain_core.prompts import PromptTemplate
5
+ from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
6
  from langchain_community.document_loaders import PyPDFLoader
7
  from langchain_google_genai import ChatGoogleGenerativeAI
8
  import google.generativeai as genai
9
+ from langchain.chains.question_answering import load_qa_chain # Import load_qa_chain
10
 
11
+ async def initialize(file_path, question):
 
 
 
12
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
13
  model = genai.GenerativeModel('gemini-pro')
14
  model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
15
 
16
  # Refined prompt template to encourage precise and concise answers
17
+ prompt_template = """Answer the question precisely and concisely using the provided context. Avoid any additional commentary or system messages.
18
  If the answer is not contained in the context, respond with "answer not available in context".
19
+
20
  Context:
21
  {context}
22
 
 
 
 
23
  Question:
24
  {question}
25
 
26
  Answer:
27
  """
28
+ prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
29
+
 
 
 
 
 
30
  if os.path.exists(file_path):
31
  pdf_loader = PyPDFLoader(file_path)
32
  pages = pdf_loader.load_and_split()
33
+
34
  # Extract content from each page and store along with page number
35
+ page_contexts = [f"Page {i+1}: {page.page_content}" for i, page in enumerate(pages)]
36
  context = "\n".join(page_contexts[:30]) # Using the first 30 pages for context
37
+
38
  # Load the question-answering chain
39
  stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
 
 
 
 
40
 
41
  # Get the answer from the model
42
+ stuff_answer = await stuff_chain.ainvoke({"input_documents": pages, "question": question, "context": context})
43
  answer = stuff_answer.get('output_text', '').strip()
44
+
45
  # Identify key sentences or phrases
46
  key_phrases = answer.split(". ") # Split answer into sentences for more precise matching
47
+
48
+ relevant_pages = set()
 
49
  for i, page in enumerate(pages):
50
  for phrase in key_phrases:
51
  if phrase.lower() in page.page_content.lower():
52
+ relevant_pages.add(i+1) # Add page number if phrase is found
 
 
 
 
53
 
54
+ if relevant_pages:
55
+ page_numbers = ', '.join(str(p) for p in sorted(relevant_pages))
56
+ return f"Relevant pages: {page_numbers}"
 
 
 
 
57
  else:
58
+ return "No relevant pages found in the document."
 
 
 
 
 
 
 
 
 
 
 
59
  else:
60
  return "Error: Unable to process the document. Please ensure the PDF file is valid."
61
 
62
+ # Define Gradio Interface
63
  input_file = gr.File(label="Upload PDF File")
64
  input_question = gr.Textbox(label="Ask about the document")
65
+ output_text = gr.Textbox(label="Relevant Pages")
 
 
 
 
 
 
66
 
67
+ async def pdf_qa(file, question):
68
  if file is None:
69
  return "Error: No file uploaded. Please upload a PDF document."
70
 
71
+ answer = await initialize(file.name, question)
72
  return answer
73
 
74
+ # Create Gradio Interface with share=True to enable a public link
75
+ gr.Interface(fn=pdf_qa, inputs=[input_file, input_question], outputs=output_text, title="PDF Question Answering System", description="Upload a PDF file and ask questions about the content.").launch(share=True)