|
import os |
|
import time |
|
|
|
import openai |
|
import streamlit as st |
|
from dotenv import load_dotenv |
|
|
|
from extract import extract_text_from_pdfs |
|
from generate import generate_response |
|
from preprocess import preprocess_text |
|
from retrieve import create_vectorizer, retrieve |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
openai.api_key = os.getenv('api_key') |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
if "pdf_files" not in st.session_state: |
|
st.session_state.pdf_files = [] |
|
|
|
if "processed_texts" not in st.session_state: |
|
st.session_state.processed_texts = [] |
|
|
|
st.title("RAG-based PDF Query System") |
|
|
|
|
|
uploaded_files = st.file_uploader("Upload PDFs", type=["pdf"], accept_multiple_files=True) |
|
|
|
if uploaded_files: |
|
if "uploaded_files" not in st.session_state or uploaded_files != st.session_state.uploaded_files: |
|
st.session_state.uploaded_files = uploaded_files |
|
st.session_state.messages = [] |
|
st.session_state.pdf_files = [] |
|
st.session_state.processed_texts = [] |
|
|
|
|
|
with st.status("Processing the uploaded PDFs...", state="running") as status: |
|
|
|
for uploaded_file in uploaded_files: |
|
with open(uploaded_file.name, "wb") as f: |
|
f.write(uploaded_file.getbuffer()) |
|
st.session_state.pdf_files.append(uploaded_file.name) |
|
|
|
|
|
num_files = len(st.session_state.pdf_files) |
|
texts = [] |
|
for i, pdf_file in enumerate(st.session_state.pdf_files): |
|
st.write(f"Extracting text from file {i + 1} of {num_files}...") |
|
text = extract_text_from_pdfs([pdf_file]) |
|
texts.extend(text) |
|
time.sleep(0.1) |
|
|
|
|
|
st.write("Preprocessing text...") |
|
st.session_state.processed_texts = preprocess_text(texts) |
|
time.sleep(0.1) |
|
|
|
|
|
st.write("Creating vectorizer and transforming texts...") |
|
st.session_state.vectorizer, st.session_state.X = create_vectorizer(st.session_state.processed_texts) |
|
time.sleep(0.1) |
|
|
|
|
|
status.update(label="Processing complete!", state="complete") |
|
|
|
else: |
|
st.stop() |
|
|
|
|
|
st.write("### Ask a question about the uploaded PDFs") |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.write(message["content"]) |
|
|
|
|
|
prompt = st.chat_input("Ask something about the uploaded PDFs") |
|
if prompt: |
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
top_indices = retrieve(prompt, st.session_state.X, st.session_state.vectorizer) |
|
retrieved_texts = [" ".join(st.session_state.processed_texts[i]) for i in top_indices] |
|
|
|
|
|
response = generate_response(retrieved_texts, prompt) |
|
st.session_state.messages.append({"role": "assistant", "content": response}) |
|
|
|
|
|
with st.chat_message("user"): |
|
st.write(prompt) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
st.write(response) |
|
|
|
|
|
for pdf_file in st.session_state.pdf_files: |
|
if os.path.exists(pdf_file): |
|
os.remove(pdf_file) |
|
|