File size: 11,357 Bytes
fd644c0
67a56f6
a6c0d87
 
919ab87
fd644c0
e7f1d86
 
 
fd644c0
 
 
 
3fdd093
fd644c0
ced2810
225229c
87baec5
fd644c0
 
 
 
 
87baec5
919ab87
 
 
009e0ad
fd644c0
a6c0d87
 
 
87baec5
919ab87
 
 
 
b42840f
 
 
 
 
 
 
fd644c0
b42840f
 
fd644c0
b42840f
 
 
 
 
fd644c0
b42840f
fd644c0
 
b42840f
 
 
 
 
 
919ab87
87baec5
fd644c0
 
 
 
 
919ab87
fd644c0
 
 
 
87baec5
fd644c0
87baec5
a6c0d87
919ab87
87baec5
 
 
 
fd644c0
 
 
 
87baec5
fd644c0
b42840f
87baec5
fd644c0
 
 
 
 
b42840f
fd644c0
 
 
 
 
 
 
a6c0d87
fd644c0
 
 
 
 
a6c0d87
fd644c0
 
a6c0d87
 
fd644c0
 
 
 
 
 
87baec5
fd644c0
87baec5
fd644c0
87baec5
a6c0d87
fd644c0
 
87baec5
 
 
919ab87
fd644c0
 
 
 
 
 
 
 
 
 
 
 
87baec5
fd644c0
87baec5
fd644c0
87baec5
a6c0d87
 
87baec5
 
fd644c0
919ab87
fd644c0
 
 
 
919ab87
fd644c0
 
87baec5
fd644c0
87baec5
fd644c0
87baec5
a6c0d87
 
87baec5
 
fd644c0
919ab87
fd644c0
 
 
 
919ab87
fd644c0
 
87baec5
fd644c0
87baec5
fd644c0
87baec5
a6c0d87
919ab87
fd644c0
 
 
87baec5
a6c0d87
919ab87
a6c0d87
87baec5
a6c0d87
87baec5
 
 
 
 
 
fd644c0
 
 
87baec5
919ab87
fd644c0
919ab87
 
b42840f
919ab87
 
7fdd092
87baec5
 
a6c0d87
87baec5
a6c0d87
919ab87
a6c0d87
87baec5
a6c0d87
fd644c0
919ab87
 
7fdd092
d179e57
87baec5
 
a6c0d87
87baec5
 
a6c0d87
87baec5
a6c0d87
87baec5
 
fd644c0
87baec5
 
 
fd644c0
 
a6c0d87
 
fd644c0
d179e57
3fdd093
fd644c0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
# app.py
import os
import tempfile
from pathlib import Path
import base64
import fitz  # PyMuPDF
from PIL import Image
import io

import gradio as gr
from huggingface_hub import InferenceClient

# Import vectorstore and embeddings from updated packages
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter

# ── Globals ───────────────────────────────────────────────────────────────────
index = None
retriever = None
current_pdf_name = None
extracted_content = None
extracted_images = []

# ── Single Multimodal Model ──────────────────────────────────────────────────
multimodal_client = InferenceClient(model="microsoft/Phi-3.5-vision-instruct")
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/clip-ViT-B-32")

# Create temp dirs
temp_dir = tempfile.mkdtemp()
figures_dir = os.path.join(temp_dir, "figures")
os.makedirs(figures_dir, exist_ok=True)

def encode_image_to_base64(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode('utf-8')

def extract_images_from_pdf_pymupdf(pdf_path):
    extracted_images = []
    image_descriptions = []
    try:
        pdf_document = fitz.open(pdf_path)
        for page_num in range(len(pdf_document)):
            page = pdf_document.load_page(page_num)
            for img_index, img in enumerate(page.get_images()):
                xref = img[0]
                pix = fitz.Pixmap(pdf_document, xref)
                if pix.n - pix.alpha < 4:
                    img_data = pix.tobytes("png")
                    img_pil = Image.open(io.BytesIO(img_data))
                    image_filename = f"page_{page_num}_img_{img_index}.png"
                    image_path = os.path.join(figures_dir, image_filename)
                    img_pil.save(image_path)
                    desc = analyze_image_with_multimodal_model(image_path)
                    extracted_images.append(image_path)
                    image_descriptions.append(desc)
                pix = None
        pdf_document.close()
        return extracted_images, image_descriptions
    except Exception as e:
        print(f"Error extracting images: {e}")
        return [], []

def analyze_image_with_multimodal_model(image_path):
    try:
        b64 = encode_image_to_base64(image_path)
        prompt = (
            "Analyze this image and provide a detailed description. Include any text, data, "
            "charts, diagrams, tables, or important visual elements you can see.\n"
            "Image: [Image data provided]\nDescription:"
        )
        resp = multimodal_client.text_generation(
            prompt=prompt, max_new_tokens=200, temperature=0.3
        )
        return "[IMAGE CONTENT]: " + resp.strip()
    except Exception as e:
        return f"[IMAGE CONTENT]: Could not analyze image - {e}"

def process_pdf_multimodal(pdf_file):
    global current_pdf_name, index, retriever, extracted_content, extracted_images
    if pdf_file is None:
        return None, "❌ Please upload a PDF file.", gr.update(interactive=False)

    current_pdf_name = os.path.basename(pdf_file.name)
    extracted_images.clear()
    for f in os.listdir(figures_dir):
        os.remove(os.path.join(figures_dir, f))

    try:
        # Text extraction
        pdf_document = fitz.open(pdf_file.name)
        text_elements = []
        for i in range(len(pdf_document)):
            p = pdf_document.load_page(i)
            t = p.get_text().strip()
            if t:
                text_elements.append(f"[PAGE {i+1}]\n{t}")
        pdf_document.close()

        # Image extraction & analysis
        imgs, img_descs = extract_images_from_pdf_pymupdf(pdf_file.name)
        extracted_images.extend(imgs)

        # Combine content and split
        all_content = text_elements + img_descs
        extracted_content = "\n\n".join(all_content)
        if not extracted_content:
            return current_pdf_name, "❌ No content extracted.", gr.update(interactive=False)

        splitter = RecursiveCharacterTextSplitter(
            chunk_size=1000, chunk_overlap=200, add_start_index=True
        )
        chunks = splitter.split_text(extracted_content)

        index = FAISS.from_texts(chunks, embeddings)
        retriever = index.as_retriever(search_kwargs={"k": 3})

        status = (
            f"βœ… Processed '{current_pdf_name}' β€” "
            f"{len(chunks)} chunks "
            f"({len(text_elements)} pages, {len(img_descs)} images analyzed)"
        )
        return current_pdf_name, status, gr.update(interactive=True)

    except Exception as e:
        return current_pdf_name, f"❌ Error processing PDF: {e}", gr.update(interactive=False)

def ask_multimodal_question(pdf_name, question):
    global retriever
    if not retriever:
        return "❌ Please upload and process a PDF first."
    if not question.strip():
        return "❌ Please enter a question."

    try:
        docs = retriever.invoke(question)
        context = "\n\n".join(d.page_content for d in docs)
        prompt = (
            "You are an AI assistant analyzing a document that contains both text and visual elements.\n\n"
            f"RETRIEVED CONTEXT:\n{context}\n\n"
            f"QUESTION: {question}\n"
            "Please provide a comprehensive answer based on the retrieved context above. "
            "If you reference visual elements, mention them explicitly.\nANSWER:"
        )
        resp = multimodal_client.text_generation(
            prompt=prompt, max_new_tokens=300, temperature=0.5
        )
        return resp.strip()
    except Exception as e:
        return f"❌ Error generating answer: {e}"

def generate_multimodal_summary():
    if not extracted_content:
        return "❌ Please upload and process a PDF first."
    try:
        preview = extracted_content[:4000]
        messages = [
            {"role":"user","content":[{"type":"text","text":
                "Please provide a comprehensive summary of this document content. The content includes both textual "
                f"information and descriptions of visual elements.\n\nDOCUMENT CONTENT:\n{preview}\n\nSUMMARY:"
            }]}
        ]
        resp = multimodal_client.chat_completion(
            messages=messages, max_tokens=250, temperature=0.3
        )
        return resp["choices"][0]["message"]["content"].strip()
    except Exception as e:
        return f"❌ Error generating summary: {e}"

def extract_multimodal_keywords():
    if not extracted_content:
        return "❌ Please upload and process a PDF first."
    try:
        preview = extracted_content[:3000]
        messages = [
            {"role":"user","content":[{"type":"text","text":
                "Analyze the following document content and extract 12-15 key terms, concepts, and important phrases. "
                f"DOCUMENT CONTENT:\n{preview}\n\nKEY TERMS:"
            }]}
        ]
        resp = multimodal_client.chat_completion(
            messages=messages, max_tokens=120, temperature=0.3
        )
        return resp["choices"][0]["message"]["content"].strip()
    except Exception as e:
        return f"❌ Error extracting keywords: {e}"

def clear_multimodal_interface():
    global index, retriever, current_pdf_name, extracted_content, extracted_images
    for f in os.listdir(figures_dir):
        try: os.remove(os.path.join(figures_dir, f))
        except: pass
    index = retriever = None
    current_pdf_name = extracted_content = None
    extracted_images.clear()
    return None, "", gr.update(interactive=False)

# ── Gradio UI ────────────────────────────────────────────────────────────────
theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue")

with gr.Blocks(theme=theme, css="""
    .container { border-radius: 10px; padding: 15px; }
    .pdf-active { border-left: 3px solid #6366f1; padding-left: 10px; background-color: rgba(99,102,241,0.1); }
    .footer { text-align: center; margin-top: 30px; font-size: 0.8em; color: #666; }
    .main-title { text-align: center; font-size: 64px; font-weight: bold; margin-bottom: 20px; }
    .multimodal-badge { background: linear-gradient(45deg, #6366f1, #8b5cf6); color: white; padding: 5px 15px; border-radius: 20px; font-size: 14px; display: inline-block; margin: 10px auto; }
    .model-info { background: #f8fafc; border: 1px solid #e2e8f0; border-radius: 8px; padding: 10px; margin: 10px 0; font-size: 12px; color: #64748b; }
""") as demo:
    gr.Markdown("<div class='main-title'>Unified MultiModal RAG</div>")
    gr.Markdown("<div style='text-align:center;'><span class='multimodal-badge'>🧠 Single Model β€’ Text + Vision</span></div>")
    gr.Markdown("""
    <div class='model-info'>
    <strong>πŸ€– Powered by:</strong> Microsoft Phi-3.5-Vision + CLIP Embeddings + PyMuPDF (HF Spaces Compatible)
    </div>
    """)

    with gr.Row():
        with gr.Column():
            gr.Markdown("## πŸ“„ Document Input")
            pdf_display = gr.Textbox(label="Active Document", interactive=False, elem_classes="pdf-active")
            pdf_file = gr.File(file_types=[".pdf"], type="filepath", label="Upload PDF (with images/charts)")
            upload_button = gr.Button("πŸ”„ Process with Multimodal AI", variant="primary")
            status_box = gr.Textbox(label="Processing Status", interactive=False)
        with gr.Column():
            gr.Markdown("## ❓ Ask Questions")
            question_input = gr.Textbox(lines=3, placeholder="Ask about text or visual content...", interactive=False)
            ask_button = gr.Button("πŸ” Ask Multimodal AI", variant="primary")
            answer_output = gr.Textbox(label="AI Response", lines=8, interactive=False)

    with gr.Row():
        with gr.Column():
            summary_button = gr.Button("πŸ“‹ Generate Summary", variant="secondary")
            summary_output = gr.Textbox(label="Document Summary", lines=4, interactive=False)
        with gr.Column():
            keywords_button = gr.Button("🏷️ Extract Keywords", variant="secondary")
            keywords_output = gr.Textbox(label="Key Terms", lines=4, interactive=False)

    clear_button = gr.Button("πŸ—‘οΈ Clear All", variant="secondary")
    gr.Markdown("""
    <div class='footer'>
        <strong>Unified Multimodal Pipeline:</strong> One model handles text, images, charts, tables, diagrams, and mixed content queries
    </div>
    """)

    upload_button.click(process_pdf_multimodal, [pdf_file], [pdf_display, status_box, question_input])
    ask_button.click(ask_multimodal_question, [pdf_display, question_input], answer_output)
    summary_button.click(generate_multimodal_summary, [], summary_output)
    keywords_button.click(extract_multimodal_keywords, [], keywords_output)
    clear_button.click(clear_multimodal_interface, [], [pdf_file, pdf_display, question_input])

if __name__ == "__main__":
    demo.launch(debug=True)