File size: 26,563 Bytes
5884a33
67a56f6
5884a33
a6c0d87
5884a33
 
 
 
 
 
 
919ab87
5884a33
e7f1d86
5884a33
 
 
 
 
e7f1d86
65f0698
 
 
 
5884a33
65f0698
 
225229c
5884a33
 
87baec5
5884a33
 
 
 
 
009e0ad
5884a33
 
87baec5
5884a33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f824f1
5884a33
919ab87
5884a33
 
7f824f1
5884a33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b42840f
5884a33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b42840f
5884a33
7f824f1
5884a33
 
 
b42840f
5884a33
 
87baec5
5884a33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f824f1
5884a33
 
87baec5
5884a33
 
7f824f1
5884a33
 
 
 
 
 
 
 
 
 
 
87baec5
5884a33
 
 
 
 
87baec5
5884a33
 
 
 
 
 
 
fd644c0
5884a33
 
87baec5
5884a33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd644c0
5884a33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd644c0
5884a33
 
7f824f1
5884a33
 
87baec5
5884a33
 
 
 
 
 
 
 
919ab87
5884a33
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659

import os
import gradio as gr
import tempfile
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from unstructured.partition.pdf import partition_pdf
from unstructured.partition.utils.constants import PartitionStrategy
from huggingface_hub import InferenceClient
import base64
from PIL import Image
import io
import requests
from getpass import getpass
import PyPDF2
import fitz  # PyMuPDF
import pytesseract

# # Step 2: Set up Hugging Face Token
# print("πŸ”‘ Setting up Hugging Face Token...")
# print("Please enter your Hugging Face token (get it from: https://huggingface.co/settings/tokens)")
# HF_TOKEN = getpass("Hugging Face Token: ")

# # Set environment variable
# os.environ["HUGGINGFACE_HUB_TOKEN"] = HF_TOKEN

# Step 3: Initialize Hugging Face components
print("πŸš€ Initializing models...")

# Initialize embeddings model (runs locally for better performance)
embeddings = HuggingFaceEmbeddings(
    model_name="sentence-transformers/all-MiniLM-L6-v2",
    model_kwargs={'device': 'cpu'}
)

# Initialize vector store
vector_store = InMemoryVectorStore(embeddings)

# Initialize Hugging Face Inference clients with proper multimodal support
def initialize_multimodal_clients():
    """Initialize clients with proper multimodal capabilities"""
    
    # Vision-Language Models (can understand images AND text together)
    multimodal_models = [
        "microsoft/git-large-coco",           # Best for image+text understanding
        "Salesforce/blip2-opt-2.7b",         # Strong multimodal model
        "microsoft/git-base-coco",            # Lighter alternative
        "Salesforce/blip-image-captioning-large"  # Good image understanding
    ]
    
    # Text-only models for when no images are involved
    text_models = [
        "google/flan-t5-base",                # Excellent for Q&A
        "microsoft/DialoGPT-medium",          # Conversational
        "facebook/blenderbot-400M-distill",   # Another option
    ]
    
    vision_client = None
    text_client = None
    
    # Try to initialize multimodal/vision client
    for model_name in multimodal_models:
        try:
            vision_client = InferenceClient(model=model_name, token=HF_TOKEN)
            print(f"βœ… Multimodal client initialized: {model_name}")
            break
        except Exception as e:
            print(f"⚠️  Failed to initialize {model_name}: {e}")
            continue
    
    # Try to initialize text client
    for model_name in text_models:
        try:
            text_client = InferenceClient(model=model_name, token=HF_TOKEN)
            print(f"βœ… Text client initialized: {model_name}")
            break
        except Exception as e:
            print(f"⚠️  Failed to initialize {model_name}: {e}")
            continue
    
    return vision_client, text_client

vision_client, text_client = initialize_multimodal_clients()

template = """
You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.

Question: {question} 
Context: {context} 
Answer:
"""

def extract_text_with_multiple_methods(pdf_path):
    """Try multiple methods to extract text from PDF"""
    extracted_text = ""
    methods_tried = []
    
    # Method 1: PyPDF2
    try:
        print("πŸ” Trying PyPDF2...")
        with open(pdf_path, 'rb') as file:
            pdf_reader = PyPDF2.PdfReader(file)
            text_parts = []
            for page_num, page in enumerate(pdf_reader.pages):
                page_text = page.extract_text()
                if page_text.strip():
                    text_parts.append(f"Page {page_num + 1}:\n{page_text}")
            
            if text_parts:
                extracted_text = "\n\n".join(text_parts)
                methods_tried.append("PyPDF2")
                print(f"βœ… PyPDF2 extracted {len(extracted_text)} characters")
    except Exception as e:
        print(f"⚠️ PyPDF2 failed: {e}")
    
    # Method 2: PyMuPDF (fitz) - often better for complex PDFs
    if not extracted_text.strip():
        try:
            print("πŸ” Trying PyMuPDF...")
            doc = fitz.open(pdf_path)
            text_parts = []
            for page_num in range(len(doc)):
                page = doc.load_page(page_num)
                page_text = page.get_text()
                if page_text.strip():
                    text_parts.append(f"Page {page_num + 1}:\n{page_text}")
            
            if text_parts:
                extracted_text = "\n\n".join(text_parts)
                methods_tried.append("PyMuPDF")
                print(f"βœ… PyMuPDF extracted {len(extracted_text)} characters")
            doc.close()
        except Exception as e:
            print(f"⚠️ PyMuPDF failed: {e}")
    
    # Method 3: OCR with PyMuPDF for image-based PDFs
    if not extracted_text.strip():
        try:
            print("πŸ” Trying OCR with PyMuPDF...")
            doc = fitz.open(pdf_path)
            text_parts = []
            for page_num in range(min(len(doc), 5)):  # Limit to first 5 pages for OCR
                page = doc.load_page(page_num)
                # Convert page to image
                pix = page.get_pixmap()
                img_data = pix.tobytes("png")
                img = Image.open(io.BytesIO(img_data))
                
                # Apply OCR
                ocr_text = pytesseract.image_to_string(img)
                if ocr_text.strip():
                    text_parts.append(f"Page {page_num + 1} (OCR):\n{ocr_text}")
            
            if text_parts:
                extracted_text = "\n\n".join(text_parts)
                methods_tried.append("OCR")
                print(f"βœ… OCR extracted {len(extracted_text)} characters")
            doc.close()
        except Exception as e:
            print(f"⚠️ OCR failed: {e}")
    
    return extracted_text, methods_tried

def upload_and_process_pdf(pdf_file):
    """Process uploaded PDF file with enhanced error handling"""
    if pdf_file is None:
        return "Please upload a PDF file first."
    
    try:
        # Create temporary directories
        with tempfile.TemporaryDirectory() as temp_dir:
            figures_dir = os.path.join(temp_dir, "figures")
            os.makedirs(figures_dir, exist_ok=True)
            
            # Save uploaded file temporarily
            temp_pdf_path = os.path.join(temp_dir, "uploaded.pdf")
            with open(temp_pdf_path, "wb") as f:
                f.write(pdf_file)
            
            # Check file size and validity
            file_size = os.path.getsize(temp_pdf_path)
            print(f"πŸ“„ Processing PDF: {file_size} bytes")
            
            if file_size == 0:
                return "❌ The uploaded file is empty. Please check your PDF file."
            
            if file_size > 50 * 1024 * 1024:  # 50MB limit
                return "❌ File too large (>50MB). Please upload a smaller PDF."
            
            # Try multiple extraction methods
            text, methods = extract_text_with_multiple_methods(temp_pdf_path)
            
            # Process with unstructured as backup/additional method
            unstructured_text = ""
            try:
                print("πŸ” Trying unstructured...")
                elements = partition_pdf(
                    temp_pdf_path,
                    strategy=PartitionStrategy.FAST,
                    extract_image_block_types=["Image", "Table"],
                    extract_image_block_output_dir=figures_dir,
                    infer_table_structure=True
                )
                
                # Extract text elements
                text_elements = []
                for element in elements:
                    if hasattr(element, 'text') and element.text and element.category not in ["Image", "Table"]:
                        text_elements.append(element.text)
                
                if text_elements:
                    unstructured_text = "\n\n".join(text_elements)
                    print(f"βœ… Unstructured extracted {len(unstructured_text)} characters")
                    
                    # Combine with existing text if available
                    if text.strip():
                        text = f"{text}\n\n--- Additional Content ---\n\n{unstructured_text}"
                    else:
                        text = unstructured_text
                        methods.append("unstructured")
                
            except Exception as unstructured_error:
                print(f"⚠️ Unstructured processing failed: {unstructured_error}")
            
            # Process images
            image_text = ""
            image_count = 0
            if os.path.exists(figures_dir):
                for file in os.listdir(figures_dir):
                    if file.lower().endswith(('.png', '.jpg', '.jpeg')):
                        try:
                            extracted_image_text = extract_text_from_image(os.path.join(figures_dir, file))
                            image_text += f"\n\n{extracted_image_text}"
                            image_count += 1
                        except Exception as e:
                            print(f"⚠️ Error processing image {file}: {e}")
            
            # Also try to extract images directly from PDF using PyMuPDF
            try:
                doc = fitz.open(temp_pdf_path)
                for page_num in range(min(len(doc), 10)):  # Process first 10 pages
                    page = doc.load_page(page_num)
                    image_list = page.get_images(full=True)
                    
                    for img_index, img in enumerate(image_list[:3]):  # Max 3 images per page
                        try:
                            xref = img[0]
                            pix = fitz.Pixmap(doc, xref)
                            if pix.n - pix.alpha < 4:  # GRAY or RGB
                                img_data = pix.tobytes("png")
                                img_path = os.path.join(figures_dir, f"page_{page_num}_img_{img_index}.png")
                                with open(img_path, "wb") as img_file:
                                    img_file.write(img_data)
                                
                                extracted_image_text = extract_text_from_image(img_path)
                                image_text += f"\n\n{extracted_image_text}"
                                image_count += 1
                            pix = None
                        except Exception as img_error:
                            print(f"⚠️ Error extracting image: {img_error}")
                            continue
                doc.close()
            except Exception as e:
                print(f"⚠️ Error extracting images from PDF: {e}")
            
            # Combine all text
            full_text = text
            if image_text.strip():
                full_text += f"\n\n--- Image Content ---\n{image_text}"
            
            if not full_text.strip():
                return (f"⚠️ No text could be extracted from the PDF using any method. "
                       f"This might be a scanned PDF without OCR text, or the file might be corrupted. "
                       f"Methods tried: {', '.join(['PyPDF2', 'PyMuPDF', 'OCR', 'unstructured']) if not methods else ', '.join(methods)}")
            
            # Split and index the text
            chunked_texts = split_text(full_text)
            
            if not chunked_texts:
                return "⚠️ Text was extracted but could not be split into chunks."
            
            # Clear existing vector store and add new documents
            global vector_store
            vector_store = InMemoryVectorStore(embeddings)
            index_docs(chunked_texts)
            
            success_msg = (f"βœ… PDF processed successfully!\n"
                          f"πŸ“Š Statistics:\n"
                          f"- Text chunks: {len(chunked_texts)}\n"
                          f"- Images processed: {image_count}\n"
                          f"- Methods used: {', '.join(methods)}\n"
                          f"- Total characters: {len(full_text)}")
            
            return success_msg
    
    except Exception as e:
        return f"❌ Error processing PDF: {str(e)}\n\nTroubleshooting tips:\n- Ensure the PDF is not password protected\n- Try a different PDF file\n- Check if the file is corrupted"

def load_pdf(file_path, figures_directory):
    """Legacy function - now handled by upload_and_process_pdf"""
    return extract_text_with_multiple_methods(file_path)[0]

def extract_text_from_image(image_path):
    """Extract text description from image using Hugging Face Vision model"""
    try:
        # First try OCR for any text in the image
        ocr_text = ""
        try:
            img = Image.open(image_path)
            ocr_text = pytesseract.image_to_string(img)
            if ocr_text.strip():
                ocr_text = f"Text in image: {ocr_text.strip()}"
        except Exception as ocr_error:
            print(f"⚠️ OCR failed for image: {ocr_error}")
        
        # Then use vision model for description
        vision_description = ""
        if vision_client:
            try:
                with open(image_path, "rb") as img_file:
                    image_data = img_file.read()
                
                response = vision_client.image_to_text(image_data)
                
                if isinstance(response, list) and len(response) > 0:
                    vision_description = response[0].get('generated_text', '')
                elif isinstance(response, dict):
                    vision_description = response.get('generated_text', '')
                else:
                    vision_description = str(response)
                    
            except Exception as vision_error:
                print(f"⚠️ Vision model failed: {vision_error}")
        
        # Combine OCR and vision results
        combined_result = []
        if ocr_text:
            combined_result.append(ocr_text)
        if vision_description:
            combined_result.append(f"Image description: {vision_description}")
        
        if combined_result:
            return "\n".join(combined_result)
        else:
            return "Image content: Visual element present but could not be processed"
            
    except Exception as e:
        print(f"⚠️ Error extracting text from image: {e}")
        return "Image content: Visual element present but could not be processed"

def split_text(text):
    """Split text into chunks"""
    if not text or not text.strip():
        return []
    
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1000,
        chunk_overlap=200,
        add_start_index=True
    )
    return text_splitter.split_text(text)

def index_docs(texts):
    """Index documents in vector store"""
    if texts:
        vector_store.add_texts(texts)
        print(f"πŸ“š Indexed {len(texts)} text chunks")

def retrieve_docs(query, k=4):
    """Retrieve relevant documents"""
    try:
        return vector_store.similarity_search(query, k=k)
    except Exception as e:
        print(f"⚠️ Error retrieving documents: {e}")
        return []

def answer_question_hf(question):
    """Answer question using Hugging Face multimodal models"""
    try:
        # Retrieve relevant documents
        related_documents = retrieve_docs(question)
        
        if not related_documents:
            return "❓ No relevant documents found. Please upload and process a PDF first."
        
        # Prepare context
        context = "\n\n".join([doc.page_content for doc in related_documents])
        
        # Limit context length for better performance
        if len(context) > 1500:
            context = context[:1500] + "..."
        
        # Check if we have image content in the context
        has_image_content = "Image content:" in context or "Image description:" in context
        
        if has_image_content and vision_client:
            # Use multimodal approach for questions involving images
            try:
                # For multimodal models, we can send both text and image context
                multimodal_prompt = f"""
                Based on the document content below (including text and image descriptions), answer this question: {question}
                
                Document content:
                {context}
                
                Please provide a clear, concise answer in 2-3 sentences.
                """
                
                response = vision_client.text_generation(
                    multimodal_prompt,
                    max_new_tokens=150,
                    temperature=0.7,
                    do_sample=True,
                    return_full_text=False,
                    stop=["Question:", "Document content:", "\n\n\n"]
                )
                
                if isinstance(response, dict):
                    answer = response.get('generated_text', '')
                elif isinstance(response, str):
                    answer = response
                else:
                    answer = str(response)
                
                if answer.strip():
                    return f"πŸ–ΌοΈ {answer.strip()}"
                    
            except Exception as multimodal_error:
                print(f"⚠️ Multimodal model failed: {multimodal_error}")
        
        # Fall back to text-only approach
        if text_client:
            try:
                text_prompt = f"""
                Question: {question}
                
                Based on the following information from the document, provide a clear and concise answer:
                
                {context}
                
                Answer:"""
                
                response = text_client.text_generation(
                    text_prompt,
                    max_new_tokens=150,
                    temperature=0.7,
                    do_sample=True,
                    return_full_text=False,
                    stop=["Question:", "Answer:", "\n\n\n"]
                )
                
                if isinstance(response, dict):
                    answer = response.get('generated_text', '')
                elif isinstance(response, str):
                    answer = response
                else:
                    answer = str(response)
                
                # Clean up the answer
                answer = answer.strip()
                if answer:
                    return f"πŸ“„ {answer}"
                    
            except Exception as text_error:
                print(f"⚠️ Text model failed: {text_error}")
        
        # Last resort: Return extracted context
        if context:
            return f"πŸ“‹ Based on the document, here's the relevant information:\n\n{context[:500]}{'...' if len(context) > 500 else ''}"
        else:
            return "❌ Unable to find relevant information in the document."
    
    except Exception as e:
        return f"❌ Error generating answer: {str(e)}"

def create_colab_interface():
    """Create Gradio interface optimized for Colab"""
    
    with gr.Blocks(
        title="Enhanced Multimodal RAG with Hugging Face", 
        theme=gr.themes.Soft(),
        css="""
        .gradio-container {
            max-width: 1200px !important;
        }
        """
    ) as iface:
        
        gr.HTML("""
        <div style="text-align: center; padding: 20px;">
            <h1>πŸ“š Enhanced Multimodal RAG with Hugging Face</h1>
            <p>Upload a PDF document and ask questions about its content, including images and tables!</p>
            <p><em>Now with improved PDF processing and multiple extraction methods</em></p>
        </div>
        """)
        
        with gr.Row():
            with gr.Column(scale=1):
                # PDF Upload Section
                gr.Markdown("### πŸ“€ Upload Document")
                pdf_input = gr.File(
                    label="Upload PDF Document",
                    file_types=[".pdf"],
                    type="binary",
                    height=100
                )
                
                upload_btn = gr.Button("πŸ”„ Process PDF", variant="primary", size="lg")
                upload_status = gr.Textbox(
                    label="Processing Status",
                    interactive=False,
                    lines=6,
                    placeholder="Upload a PDF and click 'Process PDF' to begin..."
                )
            
            with gr.Column(scale=2):
                # Chat Interface
                gr.Markdown("### πŸ’¬ Chat Interface")
                chatbot = gr.Chatbot(
                    label="Chat with your document",
                    height=400,
                    show_label=False
                )
                
                with gr.Row():
                    question_input = gr.Textbox(
                        label="Ask a question",
                        placeholder="What is this document about?",
                        lines=1,
                        scale=4
                    )
                    ask_btn = gr.Button("Ask", variant="secondary", scale=1)
        
        # Example questions
        gr.Markdown("### πŸ’‘ Example Questions")
        example_questions = [
            "What is the main topic of this document?",
            "Can you summarize the key points?",
            "What information is shown in the images or tables?",
            "What are the conclusions or recommendations?"
        ]
        
        with gr.Row():
            for i, eq in enumerate(example_questions):
                example_btn = gr.Button(eq, size="sm")
                example_btn.click(
                    lambda x=eq: x,
                    outputs=[question_input]
                )
        
        # Event handlers
        def process_pdf_and_update(pdf_file):
            if pdf_file is None:
                return "Please select a PDF file first."
            return upload_and_process_pdf(pdf_file)
        
        def ask_and_update_chat(question, chat_history):
            if not question.strip():
                return chat_history, ""
            
            # Get answer
            answer = answer_question_hf(question)
            
            # Update chat history
            if chat_history is None:
                chat_history = []
            
            chat_history.append([question, answer])
            
            return chat_history, ""
        
        def clear_chat():
            return []
        
        # Connect events
        upload_btn.click(
            fn=process_pdf_and_update,
            inputs=[pdf_input],
            outputs=[upload_status]
        )
        
        ask_btn.click(
            fn=ask_and_update_chat,
            inputs=[question_input, chatbot],
            outputs=[chatbot, question_input]
        )
        
        question_input.submit(
            fn=ask_and_update_chat,
            inputs=[question_input, chatbot],
            outputs=[chatbot, question_input]
        )
        
        # Clear chat button
        clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="stop", size="sm")
        clear_btn.click(
            fn=clear_chat,
            outputs=[chatbot]
        )
        
        # Enhanced Instructions
        gr.Markdown("""
        ---
        ### πŸ“‹ Instructions:
        1. **Get HF Token**: Visit [Hugging Face Settings](https://huggingface.co/settings/tokens) to get your token
        2. **Upload PDF**: Click "Choose File" and select your PDF document
        3. **Process Document**: Click "Process PDF" and wait for confirmation
        4. **Ask Questions**: Type questions or use example prompts
        
        ### ✨ Enhanced Features:
        - πŸ“„ **Multiple Text Extraction Methods**: PyPDF2, PyMuPDF, OCR, and Unstructured
        - πŸ–ΌοΈ **Advanced Image Processing**: Direct PDF image extraction + vision models
        - πŸ” **Robust PDF Handling**: Works with scanned PDFs, complex layouts, and image-heavy documents
        - πŸ’¬ **Interactive Chat**: Conversation history with multimodal understanding
        - ⚑ **Error Recovery**: Graceful fallbacks when one extraction method fails
        - πŸ“Š **Processing Statistics**: Detailed feedback on what was extracted
        
        ### πŸ”§ Models Used:
        - **🎭 Multimodal**: Microsoft GIT-Large (understands images + text together)
        - **πŸ“ Text Generation**: Google FLAN-T5-Base (optimized for Q&A)
        - **πŸ‘οΈ Vision**: Salesforce BLIP (image captioning and understanding)
        - **πŸ” Embeddings**: Sentence Transformers all-MiniLM-L6-v2
        - **πŸ“– OCR**: Tesseract for text recognition in images
        
        ### 🎯 Multimodal Capabilities:
        - **Text + Images**: Can answer questions about both text content and visual elements
        - **Image Understanding**: Describes charts, diagrams, photos in your PDFs
        - **OCR Integration**: Extracts text from images within PDFs
        - **Context Awareness**: Combines text and visual information for comprehensive answers
        - **Fallback Strategy**: Uses multiple methods to ensure successful text extraction
        
        ### πŸ› οΈ Troubleshooting:
        - **No text extracted**: Try different PDF files, ensure not password-protected
        - **Large files**: Keep PDFs under 50MB for optimal performance
        - **Scanned PDFs**: OCR will automatically process image-based text
        - **Complex layouts**: Multiple extraction methods handle various PDF formats
        """)
    
    return iface

# Step 4: Launch the application
print("βœ… Setup complete! Launching Enhanced Gradio interface...")

# Create and launch interface
iface = create_colab_interface()

# Launch with public link for Colab
iface.launch(
    debug=True,
    share=True,  # Creates public link
    server_name="0.0.0.0",
    server_port=7860,
    show_error=True
)

print("πŸŽ‰ Enhanced Application launched successfully!")
print("πŸ“± Use the public link above to access your app from anywhere!")