import os import streamlit as st import fitz # PyMuPDF import openai from dotenv import load_dotenv from pinecone import Pinecone, ServerlessSpec # Load the environment variables from the .env file load_dotenv() openai_api_key = os.getenv('OPENAI_API_KEY') pinecone_api_key = os.getenv('PINECONE_API_KEY') pinecone_environment = os.getenv('PINECONE_ENVIRONMENT') # Initialize Pinecone pc = Pinecone(api_key=pinecone_api_key) # Streamlit app st.title("Chat with Your Document") st.write("Upload a PDF file to chat with its content using Pinecone and OpenAI.") # File upload uploaded_file = st.file_uploader("Choose a PDF file", type="pdf") if uploaded_file is not None: # Load the PDF file pdf_document = fitz.open(stream=uploaded_file.read(), filetype="pdf") pdf_text = "" for page_num in range(pdf_document.page_count): page = pdf_document.load_page(page_num) pdf_text += page.get_text() # Initialize OpenAI embeddings openai.api_key = openai_api_key # Create a Pinecone vector store index_name = "pdf-analysis" if index_name not in pc.list_indexes().names(): pc.create_index( name=index_name, dimension=512, metric='euclidean', spec=ServerlessSpec(cloud='aws', region=pinecone_environment) ) vector_store = pc.Index(index_name) # Add the PDF text to the vector store vector_store.upsert([(str(i), openai.Embedding.create(input=pdf_text)["data"][0]["embedding"]) for i in range(len(pdf_text))]) # Chat with the document user_input = st.text_input("Ask a question about the document:") if st.button("Ask"): if user_input: response = openai.Completion.create( engine="davinci", prompt=f"Analyze the following text and answer the question: {pdf_text}\n\nQuestion: {user_input}", max_tokens=150 ) st.write(response.choices[0].text.strip()) else: st.write("Please enter a question to ask.") # Display the PDF text st.write("Extracted Text from PDF:") st.write(pdf_text) #