Spaces:
Runtime error
Runtime error
File size: 3,995 Bytes
f4e447d ccfb409 f4e447d e63918a 5d3c81a 5ab933e 5d3c81a 5ab933e ccfb409 5d3c81a 5ab933e ccfb409 5d3c81a 5ab933e 5d3c81a 5ab933e ccfb409 5ab933e 5d3c81a ccfb409 5ab933e ccfb409 5ab933e 5d3c81a ccfb409 971f411 5d3c81a 971f411 ccfb409 971f411 5d3c81a ccfb409 971f411 ccfb409 5d3c81a 971f411 ccfb409 5d3c81a 7e4f369 5d3c81a ccfb409 ea4a744 ccfb409 3280ea5 ccfb409 8fb280f b04efeb 4f34423 fe656af 4f34423 05d137c 4f34423 fe656af 4f34423 62e2259 6d7706e 761c058 6807ee7 62e2259 761c058 6807ee7 62e2259 6d7706e e63918a 21dcb99 05d137c 9504048 17e0bb7 fc5882b a060be9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import gradio as gr
import streamlit as st
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
import os
import fitz
from PIL import Image
# Global variables
COUNT, N = 0, 0
chat_history = []
chain = None # Initialize chain as None
# Function to set the OpenAI API key
def set_apikey(api_key):
os.environ['OPENAI_API_KEY'] = api_key
return disable_box
# Function to enable the API key input box
def enable_api_box():
return enable_box
# Function to add text to the chat history
def add_text(history, text):
if not text:
raise gr.Error('Enter text')
history = history + [(text, '')]
return history
# Function to process the PDF file and create a conversation chain
def process_file(file):
global chain
if 'OPENAI_API_KEY' not in os.environ:
raise gr.Error('Upload your OpenAI API key')
# Replace with your actual PDF processing logic
loader = PyPDFLoader(file.name)
documents = loader.load()
embeddings = OpenAIEmbeddings()
pdfsearch = Chroma.from_documents(documents, embeddings)
chain = ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0.3),
retriever=pdfsearch.as_retriever(search_kwargs={"k": 1}),
return_source_documents=True)
return chain
# Function to generate a response based on the chat history and query
def generate_response(history, query, pdf_upload):
global COUNT, N, chat_history, chain
if not pdf_upload:
raise gr.Error(message='Upload a PDF')
if COUNT == 0:
chain = process_file(pdf_upload)
COUNT += 1
# Replace with your LangChain logic to generate a response
result = chain({"question": query, 'chat_history': chat_history}, return_only_outputs=True)
chat_history += [(query, result["answer"])]
N = list(result['source_documents'][0])[1][1]['page'] # Adjust as needed
for char in result['answer']:
history[-1][-1] += char
return history, ''
# Function to render a specific page of a PDF file as an image
def render_file(file):
global N
doc = fitz.open(file.name)
page = doc[N]
pix = page.get_pixmap(matrix=fitz.Matrix(300/72, 300/72))
image = Image.frombytes('RGB', [pix.width, pix.height], pix.samples)
return image
# Function to render initial content from the PDF
def render_first(pdf_file):
# Replace with logic to process the PDF and generate an initial image
image = Image.new('RGB', (600, 400), color = 'white') # Placeholder
return image
# Streamlit & Gradio Interface
st.title("PDF-Powered Chatbot")
with st.container():
gr.Markdown("""
<style>
.image-container { height: 680px; }
</style>
""")
with gr.Blocks() as demo:
pdf_upload1 = gr.UploadButton("π Upload PDF 1", file_types=[".pdf"]) # Define pdf_upload1
# ... (rest of your interface creation)
txt = gr.Textbox(label="Enter your query", placeholder="Ask a question...")
submit_btn = gr.Button('Submit')
@submit_btn.click()
def on_submit():
add_text(chatbot, txt)
generate_response(chatbot, txt, pdf_upload1) # Use pdf_upload1 here
render_file(pdf_upload1) # Use pdf_upload1 here
if __name__ == "__main__":
gr.Interface(
fn=generate_response,
inputs=[
"file", # Define pdf_upload1
"text", # Define chatbot output
"text" # Define txt
],
outputs=[
"image", # Define show_img
"text", # Define chatbot output
"text" # Define txt
],
title="PDF-Powered Chatbot"
).launch(server_port=free_port)
|