Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
from langchain.document_loaders import PyPDFLoader
|
4 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
5 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
6 |
+
from langchain import FAISS
|
7 |
+
from gradio_pdf import PDF # Import PDF from gradio_pdf
|
8 |
+
|
9 |
+
# Function to process uploaded PDF and generate responses based on the document and user input
|
10 |
+
def chat_with_pdf(pdf_file, api_key, user_question):
|
11 |
+
# Set the Google API key
|
12 |
+
os.environ["GOOGLE_API_KEY"] = api_key
|
13 |
+
|
14 |
+
# Load the document
|
15 |
+
loader = PyPDFLoader(pdf_file.name)
|
16 |
+
pages = loader.load_and_split()
|
17 |
+
|
18 |
+
# Create a vector db index
|
19 |
+
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
|
20 |
+
db = FAISS.from_documents(pages, embeddings)
|
21 |
+
|
22 |
+
# Search relevant docs based on user question
|
23 |
+
docs = db.similarity_search(user_question)
|
24 |
+
|
25 |
+
# Prepare the context for the API request
|
26 |
+
content = "\n".join([x.page_content for x in docs])
|
27 |
+
qa_prompt = "Use the following pieces of context to answer the user's question. If you don't know the answer, just say that you don't know, don't try to make up an answer.----------------"
|
28 |
+
input_text = qa_prompt + "\nContext:" + content + "\nUser question:\n" + user_question
|
29 |
+
|
30 |
+
# Call Gemini API (ChatGoogleGenerativeAI) to generate a response
|
31 |
+
llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash")
|
32 |
+
result = llm.invoke(input_text)
|
33 |
+
|
34 |
+
# Return the bot's response (without chat history)
|
35 |
+
return result.content
|
36 |
+
|
37 |
+
# Create a Gradio interface with a split layout
|
38 |
+
with gr.Blocks() as iface:
|
39 |
+
with gr.Row():
|
40 |
+
with gr.Column(scale=1):
|
41 |
+
pdf_input = gr.File(label="Upload PDF") # Upload PDF file
|
42 |
+
pdf_display = PDF(label="PDF Preview") # PDF preview using gradio_pdf
|
43 |
+
with gr.Column(scale=1):
|
44 |
+
response_output = gr.Textbox(label="Bot Response") # Output for the bot response
|
45 |
+
question_box = gr.Textbox(label="Ask a question", placeholder="Enter your question here")
|
46 |
+
api_key_box = gr.Textbox(label="API Key", type="password", placeholder="Enter your Google API Key here")
|
47 |
+
|
48 |
+
# Directly display the PDF once uploaded without using the 'upload' method
|
49 |
+
pdf_input.change(lambda pdf_file: pdf_file.name, inputs=pdf_input, outputs=pdf_display)
|
50 |
+
|
51 |
+
# When the user submits a question, process it and return the bot's response
|
52 |
+
question_box.submit(
|
53 |
+
chat_with_pdf,
|
54 |
+
inputs=[pdf_input, api_key_box, question_box],
|
55 |
+
outputs=response_output
|
56 |
+
)
|
57 |
+
|
58 |
+
# Launch the Gradio app
|
59 |
+
iface.launch()
|