Multimodal / app.py
Muzammil6376's picture
Update app.py
fd644c0 verified
raw
history blame
11.4 kB
# app.py
import os
import tempfile
from pathlib import Path
import base64
import fitz # PyMuPDF
from PIL import Image
import io
import gradio as gr
from huggingface_hub import InferenceClient
# Import vectorstore and embeddings from updated packages
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
# ── Globals ───────────────────────────────────────────────────────────────────
index = None
retriever = None
current_pdf_name = None
extracted_content = None
extracted_images = []
# ── Single Multimodal Model ──────────────────────────────────────────────────
multimodal_client = InferenceClient(model="microsoft/Phi-3.5-vision-instruct")
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/clip-ViT-B-32")
# Create temp dirs
temp_dir = tempfile.mkdtemp()
figures_dir = os.path.join(temp_dir, "figures")
os.makedirs(figures_dir, exist_ok=True)
def encode_image_to_base64(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
def extract_images_from_pdf_pymupdf(pdf_path):
extracted_images = []
image_descriptions = []
try:
pdf_document = fitz.open(pdf_path)
for page_num in range(len(pdf_document)):
page = pdf_document.load_page(page_num)
for img_index, img in enumerate(page.get_images()):
xref = img[0]
pix = fitz.Pixmap(pdf_document, xref)
if pix.n - pix.alpha < 4:
img_data = pix.tobytes("png")
img_pil = Image.open(io.BytesIO(img_data))
image_filename = f"page_{page_num}_img_{img_index}.png"
image_path = os.path.join(figures_dir, image_filename)
img_pil.save(image_path)
desc = analyze_image_with_multimodal_model(image_path)
extracted_images.append(image_path)
image_descriptions.append(desc)
pix = None
pdf_document.close()
return extracted_images, image_descriptions
except Exception as e:
print(f"Error extracting images: {e}")
return [], []
def analyze_image_with_multimodal_model(image_path):
try:
b64 = encode_image_to_base64(image_path)
prompt = (
"Analyze this image and provide a detailed description. Include any text, data, "
"charts, diagrams, tables, or important visual elements you can see.\n"
"Image: [Image data provided]\nDescription:"
)
resp = multimodal_client.text_generation(
prompt=prompt, max_new_tokens=200, temperature=0.3
)
return "[IMAGE CONTENT]: " + resp.strip()
except Exception as e:
return f"[IMAGE CONTENT]: Could not analyze image - {e}"
def process_pdf_multimodal(pdf_file):
global current_pdf_name, index, retriever, extracted_content, extracted_images
if pdf_file is None:
return None, "❌ Please upload a PDF file.", gr.update(interactive=False)
current_pdf_name = os.path.basename(pdf_file.name)
extracted_images.clear()
for f in os.listdir(figures_dir):
os.remove(os.path.join(figures_dir, f))
try:
# Text extraction
pdf_document = fitz.open(pdf_file.name)
text_elements = []
for i in range(len(pdf_document)):
p = pdf_document.load_page(i)
t = p.get_text().strip()
if t:
text_elements.append(f"[PAGE {i+1}]\n{t}")
pdf_document.close()
# Image extraction & analysis
imgs, img_descs = extract_images_from_pdf_pymupdf(pdf_file.name)
extracted_images.extend(imgs)
# Combine content and split
all_content = text_elements + img_descs
extracted_content = "\n\n".join(all_content)
if not extracted_content:
return current_pdf_name, "❌ No content extracted.", gr.update(interactive=False)
splitter = RecursiveCharacterTextSplitter(
chunk_size=1000, chunk_overlap=200, add_start_index=True
)
chunks = splitter.split_text(extracted_content)
index = FAISS.from_texts(chunks, embeddings)
retriever = index.as_retriever(search_kwargs={"k": 3})
status = (
f"βœ… Processed '{current_pdf_name}' β€” "
f"{len(chunks)} chunks "
f"({len(text_elements)} pages, {len(img_descs)} images analyzed)"
)
return current_pdf_name, status, gr.update(interactive=True)
except Exception as e:
return current_pdf_name, f"❌ Error processing PDF: {e}", gr.update(interactive=False)
def ask_multimodal_question(pdf_name, question):
global retriever
if not retriever:
return "❌ Please upload and process a PDF first."
if not question.strip():
return "❌ Please enter a question."
try:
docs = retriever.invoke(question)
context = "\n\n".join(d.page_content for d in docs)
prompt = (
"You are an AI assistant analyzing a document that contains both text and visual elements.\n\n"
f"RETRIEVED CONTEXT:\n{context}\n\n"
f"QUESTION: {question}\n"
"Please provide a comprehensive answer based on the retrieved context above. "
"If you reference visual elements, mention them explicitly.\nANSWER:"
)
resp = multimodal_client.text_generation(
prompt=prompt, max_new_tokens=300, temperature=0.5
)
return resp.strip()
except Exception as e:
return f"❌ Error generating answer: {e}"
def generate_multimodal_summary():
if not extracted_content:
return "❌ Please upload and process a PDF first."
try:
preview = extracted_content[:4000]
messages = [
{"role":"user","content":[{"type":"text","text":
"Please provide a comprehensive summary of this document content. The content includes both textual "
f"information and descriptions of visual elements.\n\nDOCUMENT CONTENT:\n{preview}\n\nSUMMARY:"
}]}
]
resp = multimodal_client.chat_completion(
messages=messages, max_tokens=250, temperature=0.3
)
return resp["choices"][0]["message"]["content"].strip()
except Exception as e:
return f"❌ Error generating summary: {e}"
def extract_multimodal_keywords():
if not extracted_content:
return "❌ Please upload and process a PDF first."
try:
preview = extracted_content[:3000]
messages = [
{"role":"user","content":[{"type":"text","text":
"Analyze the following document content and extract 12-15 key terms, concepts, and important phrases. "
f"DOCUMENT CONTENT:\n{preview}\n\nKEY TERMS:"
}]}
]
resp = multimodal_client.chat_completion(
messages=messages, max_tokens=120, temperature=0.3
)
return resp["choices"][0]["message"]["content"].strip()
except Exception as e:
return f"❌ Error extracting keywords: {e}"
def clear_multimodal_interface():
global index, retriever, current_pdf_name, extracted_content, extracted_images
for f in os.listdir(figures_dir):
try: os.remove(os.path.join(figures_dir, f))
except: pass
index = retriever = None
current_pdf_name = extracted_content = None
extracted_images.clear()
return None, "", gr.update(interactive=False)
# ── Gradio UI ────────────────────────────────────────────────────────────────
theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue")
with gr.Blocks(theme=theme, css="""
.container { border-radius: 10px; padding: 15px; }
.pdf-active { border-left: 3px solid #6366f1; padding-left: 10px; background-color: rgba(99,102,241,0.1); }
.footer { text-align: center; margin-top: 30px; font-size: 0.8em; color: #666; }
.main-title { text-align: center; font-size: 64px; font-weight: bold; margin-bottom: 20px; }
.multimodal-badge { background: linear-gradient(45deg, #6366f1, #8b5cf6); color: white; padding: 5px 15px; border-radius: 20px; font-size: 14px; display: inline-block; margin: 10px auto; }
.model-info { background: #f8fafc; border: 1px solid #e2e8f0; border-radius: 8px; padding: 10px; margin: 10px 0; font-size: 12px; color: #64748b; }
""") as demo:
gr.Markdown("<div class='main-title'>Unified MultiModal RAG</div>")
gr.Markdown("<div style='text-align:center;'><span class='multimodal-badge'>🧠 Single Model β€’ Text + Vision</span></div>")
gr.Markdown("""
<div class='model-info'>
<strong>πŸ€– Powered by:</strong> Microsoft Phi-3.5-Vision + CLIP Embeddings + PyMuPDF (HF Spaces Compatible)
</div>
""")
with gr.Row():
with gr.Column():
gr.Markdown("## πŸ“„ Document Input")
pdf_display = gr.Textbox(label="Active Document", interactive=False, elem_classes="pdf-active")
pdf_file = gr.File(file_types=[".pdf"], type="filepath", label="Upload PDF (with images/charts)")
upload_button = gr.Button("πŸ”„ Process with Multimodal AI", variant="primary")
status_box = gr.Textbox(label="Processing Status", interactive=False)
with gr.Column():
gr.Markdown("## ❓ Ask Questions")
question_input = gr.Textbox(lines=3, placeholder="Ask about text or visual content...", interactive=False)
ask_button = gr.Button("πŸ” Ask Multimodal AI", variant="primary")
answer_output = gr.Textbox(label="AI Response", lines=8, interactive=False)
with gr.Row():
with gr.Column():
summary_button = gr.Button("πŸ“‹ Generate Summary", variant="secondary")
summary_output = gr.Textbox(label="Document Summary", lines=4, interactive=False)
with gr.Column():
keywords_button = gr.Button("🏷️ Extract Keywords", variant="secondary")
keywords_output = gr.Textbox(label="Key Terms", lines=4, interactive=False)
clear_button = gr.Button("πŸ—‘οΈ Clear All", variant="secondary")
gr.Markdown("""
<div class='footer'>
<strong>Unified Multimodal Pipeline:</strong> One model handles text, images, charts, tables, diagrams, and mixed content queries
</div>
""")
upload_button.click(process_pdf_multimodal, [pdf_file], [pdf_display, status_box, question_input])
ask_button.click(ask_multimodal_question, [pdf_display, question_input], answer_output)
summary_button.click(generate_multimodal_summary, [], summary_output)
keywords_button.click(extract_multimodal_keywords, [], keywords_output)
clear_button.click(clear_multimodal_interface, [], [pdf_file, pdf_display, question_input])
if __name__ == "__main__":
demo.launch(debug=True)