docreader / app.py
Nikhil0987's picture
Update app.py
971f411 verified
raw
history blame
7.05 kB
import gradio as gr
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
import os
import fitz
from PIL import Image
import streamlit as st
# # Global variables
# COUNT, N = 0, 0
# chat_history = []
# chain = None # Initialize chain as None
# # Function to set the OpenAI API key
# def set_apikey(api_key):
# os.environ['OPENAI_API_KEY'] = api_key
# return disable_box # Update the disable_box
# # Function to enable the API key input box
# def enable_api_box():
# return enable_box # Update the enable_box
# # Function to add text to the chat history
# def add_text(history, text):
# if not text:
# raise gr.Error('Enter text')
# history = history + [(text, '')]
# return history
# # Function to process the PDF file and create a conversation chain
# def process_file(file):
# global chain # Access the global 'chain' variable
# if 'OPENAI_API_KEY' not in os.environ:
# raise gr.Error('Upload your OpenAI API key')
# loader = PyPDFLoader(file.name)
# documents = loader.load()
# embeddings = OpenAIEmbeddings()
# pdfsearch = Chroma.from_documents(documents, embeddings)
# chain = ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0.3),
# retriever=pdfsearch.as_retriever(search_kwargs={"k": 1}),
# return_source_documents=True)
# return chain
# # Function to generate a response based on the chat history and query
def generate_response(history, query, btn):
global COUNT, N, chat_history, chain
if not btn:
raise gr.Error(message='Upload a PDF')
if COUNT == 0:
chain = process_file(btn)
COUNT += 1
result = chain({"question": query, 'chat_history': chat_history}, return_only_outputs=True)
chat_history += [(query, result["answer"])]
N = list(result['source_documents'][0])[1][1]['page']
for char in result['answer']:
history[-1][-1] += char # Update the last response
yield history, ''
# # Function to render a specific page of a PDF file as an image
# def render_file(file):
# global N
# doc = fitz.open(file.name)
# page = doc[N]
# pix = page.get_pixmap(matrix=fitz.Matrix(300/72, 300/72))
# image = Image.frombytes('RGB', [pix.width, pix.height], pix.samples)
# return image
# # Gradio application setup
# # with gr.Blocks() as demo:
# # with gr.Column():
# # gr.Markdown("""
# # <style>
# # .image-container { height: 680px; }
# # </style>
# # """)
# # with gr.Row():
# # enable_box = gr.Textbox(placeholder='Enter OpenAI API key',
# # show_label=False, interactive=True)
# # disable_box = gr.Textbox(value='OpenAI API key is Set', interactive=False)
# # change_api_key = gr.Button('Change Key')
# # with gr.Row():
# # chatbot = gr.Chatbot(value=[], elem_id='chatbot')
# # show_img = gr.Image(label='Upload PDF')
# # # Set up event handlers
# # # Event handler for submitting the OpenAI API key
# # enable_box.submit(fn=set_apikey, inputs=[enable_box], outputs=[disable_box])
# # # Event handler for changing the API key
# # change_api_key.click(fn=enable_api_box, outputs=[enable_box])
# def render_first(pdf_file):
# # ... Logic to process the PDF
# # ... Generate the first image
# return image
# with gr.Blocks() as demo:
# with gr.Column():
# gr.Markdown("""
# <style>
# .image-container { height: 680px; }
# </style>
# """)
# with gr.Row():
# enable_box = gr.Textbox(placeholder='Enter OpenAI API key',
# show_label=False, interactive=True)
# disable_box = gr.Textbox(value='OpenAI API key is Set', interactive=False)
# change_api_key = gr.Button('Change Key')
# with gr.Row():
# chatbot = gr.Chatbot(value=[], elem_id='chatbot')
# show_img = gr.Image(label='Upload PDF')
# pdf_upload = gr.UploadButton("πŸ“ Upload a PDF", file_types=[".pdf"]) # Added
# # Event handlers
# enable_box.submit(fn=set_apikey, inputs=[enable_box], outputs=[disable_box])
# change_api_key.click(fn=enable_api_box, outputs=[enable_box])
# pdf_upload.upload(fn=render_first, inputs=[pdf_upload], outputs=[show_img]) # Corrected
# txt = gr.Textbox(label="Enter your query", placeholder="Ask a question...") # Add Textbox
# submit_btn = gr.Button('Submit') # Added the Submit button
# submit_btn.click(
# fn=add_text,
# inputs=[chatbot, txt], # Assuming 'txt' is your textbox for query input
# outputs=[chatbot],
# queue=False
# ).success(
# fn=generate_response,
# inputs=[chatbot, txt, pdf_upload], # Changed from 'btn'
# outputs=[chatbot, txt]
# ).success(
# fn=render_file,
# inputs=[pdf_upload], # Changed from 'btn'
# outputs=[show_img]
# )
# demo.launch(server_port=7861)
def add_text(history, text):
if not text:
raise gr.Error('Enter text')
history = history + [(text, '')]
return history
def render_first(pdf_file):
# ... Logic to process the PDF (extract text, create summary, etc.)
# ... Generate a simple image as a placeholder
image = Image.new('RGB', (600, 400), color = 'white') # Example
return image
st.title("PDF-Powered Chatbot") # Add a title
# Gradio interface with Streamlit containers
with st.container():
gr.Markdown("""
<style>
.image-container { height: 680px; }
</style>
""")
with gr.Row():
enable_box = gr.Textbox(placeholder='Enter OpenAI API key',
show_label=False, interactive=True)
disable_box = gr.Textbox(value='OpenAI API key is Set', interactive=False)
change_api_key = gr.Button('Change Key')
with gr.Row():
chatbot = gr.Chatbot(value=[], elem_id='chatbot')
show_img = gr.Image(label='Upload PDF')
pdf_upload = gr.UploadButton("πŸ“ Upload a PDF", file_types=[".pdf"])
# Event handlers (same as before)
# ... your event handlers ...
# If you only want a Gradio interface, launch Gradio
if __name__ == "__main__":
gr.Interface(
[render_first, add_text, generate_response, render_file],
[pdf_upload, chatbot, txt, pdf_upload, pdf_upload],
[show_img, chatbot, txt, show_img],
title="PDF-Powered Chatbot",
).launch()