import gradio as gr from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import Chroma from langchain.chains import ConversationalRetrievalChain from langchain.chat_models import ChatOpenAI from langchain.document_loaders import PyPDFLoader import os import fitz from PIL import Image # Global variables COUNT, N = 0, 0 chat_history = [] chain = None # Initialize chain as None # Function to set the OpenAI API key def set_apikey(api_key): os.environ['OPENAI_API_KEY'] = api_key return disable_box # Update the disable_box # Function to enable the API key input box def enable_api_box(): return enable_box # Update the enable_box # Function to add text to the chat history def add_text(history, text): if not text: raise gr.Error('Enter text') history = history + [(text, '')] return history # Function to process the PDF file and create a conversation chain def process_file(file): global chain # Access the global 'chain' variable if 'OPENAI_API_KEY' not in os.environ: raise gr.Error('Upload your OpenAI API key') loader = PyPDFLoader(file.name) documents = loader.load() embeddings = OpenAIEmbeddings() pdfsearch = Chroma.from_documents(documents, embeddings) chain = ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0.3), retriever=pdfsearch.as_retriever(search_kwargs={"k": 1}), return_source_documents=True) return chain # Function to generate a response based on the chat history and query def generate_response(history, query, btn): global COUNT, N, chat_history, chain if not btn: raise gr.Error(message='Upload a PDF') if COUNT == 0: chain = process_file(btn) COUNT += 1 result = chain({"question": query, 'chat_history': chat_history}, return_only_outputs=True) chat_history += [(query, result["answer"])] N = list(result['source_documents'][0])[1][1]['page'] for char in result['answer']: history[-1][-1] += char # Update the last response yield history, '' # Function to render a specific page of a PDF file as an image def render_file(file): global N doc = fitz.open(file.name) page = doc[N] pix = page.get_pixmap(matrix=fitz.Matrix(300/72, 300/72)) image = Image.frombytes('RGB', [pix.width, pix.height], pix.samples) return image # Gradio application setup with gr.Blocks() as demo: with gr.Column(): gr.Markdown(""" """) with gr.Row(): enable_box = gr.Textbox(placeholder='Enter OpenAI API key', show_label=False, interactive=True) disable_box = gr.Textbox(value='OpenAI API key is Set', interactive=False) change_api_key = gr.Button('Change Key') with gr.Row(): chatbot = gr.Chatbot(value=[], elem_id='chatbot') show_img = gr.Image(label='Upload PDF') # Set up event handlers # Event handler for submitting the OpenAI API key enable_box.submit(fn=set_apikey, inputs=[enable_box], outputs=[disable_box]) # Event handler for changing the API key change_api_key.click(fn=enable_api_box, outputs=[enable_box]) def render_first(pdf_file): # ... Logic to process the PDF # ... Generate the first image return image with gr.Blocks() as demo: # ... your UI setup ... pdf_upload = gr.UploadButton("📁 Upload a PDF", file_types=[".pdf"]) # ... other event handlers ... pdf_upload.upload(fn=render_first, inputs=[pdf_upload], outputs=[show_img]) # Event handler for submitting text and generating response submit_btn.click( fn=add_text, inputs=[chatbot, txt], outputs=[chatbot], queue=False ).success( fn=generate_response, inputs=[chatbot, txt, btn], outputs=[chatbot, txt] ).success( fn=render_file, inputs=[btn], outputs=[show_img] ) demo.queue() if __name__ == "__main__": demo.launch()