docling_rag / app.py
NEXAS's picture
Update app.py
d68fe02 verified
raw
history blame
2.11 kB
import streamlit as st
import os
import json
from utils.ingestion import DocumentProcessor
from utils.llm import LLMProcessor
from utils.qa import QAEngine
st.set_page_config(page_title="AI-Powered Document QA", layout="wide")
st.title("πŸ“„ AI-Powered Document QA")
# Initialize processors
document_processor = DocumentProcessor()
llm_processor = LLMProcessor()
qa_engine = QAEngine()
# Ensure temp directory exists
os.makedirs("temp", exist_ok=True)
# Sidebar for file upload
st.sidebar.header("πŸ“‚ Upload a PDF")
uploaded_file = st.sidebar.file_uploader("Choose a PDF file", type=["pdf"])
# Document upload & processing
if uploaded_file:
pdf_path = os.path.join("temp", uploaded_file.name)
with open(pdf_path, "wb") as f:
f.write(uploaded_file.read())
st.sidebar.success("βœ… File uploaded successfully!")
with st.spinner("πŸ”„ Processing document..."):
document_processor.process_document(pdf_path)
st.sidebar.success("βœ… Document processed successfully!")
st.session_state["document_uploaded"] = True
else:
st.session_state["document_uploaded"] = False
# Divider between sections
st.markdown("---")
# Q&A Section
st.header("πŸ” Ask a Question")
question = st.text_input("Ask a question:", placeholder="What are the key insights?")
if st.button("πŸ’‘ Get Answer"):
if question:
with st.spinner("🧠 Generating response..."):
if st.session_state["document_uploaded"]:
# Use document-based QA if a file is uploaded
answer = qa_engine.query(question)
else:
# Use AI-based response if no document is uploaded
answer = llm_processor.generate_answer("", question)
st.warning("⚠️ No document uploaded. This response is generated from general AI knowledge and may not be document-specific.")
st.subheader("πŸ“ Answer:")
st.write(answer.content)
else:
st.warning("⚠️ Please enter a question.")
st.markdown("---")
st.caption("πŸ€– Powered by ChromaDB + Groq LLM | Built with ❀️ using Streamlit")