shukdevdattaEX commited on
Commit
c1546de
Β·
verified Β·
1 Parent(s): 3c93b28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +602 -42
app.py CHANGED
@@ -1,42 +1,602 @@
1
- from Crypto.Cipher import AES
2
- from Crypto.Protocol.KDF import PBKDF2
3
- import os
4
- import tempfile
5
- from dotenv import load_dotenv
6
-
7
- load_dotenv() # Load all environment variables
8
-
9
- def unpad(data):
10
- return data[:-data[-1]]
11
-
12
- def decrypt_and_run():
13
- # Get password from Hugging Face Secrets environment variable
14
- password = os.getenv("PASSWORD")
15
- if not password:
16
- raise ValueError("PASSWORD secret not found in environment variables")
17
-
18
- password = password.encode()
19
-
20
- with open("code.enc", "rb") as f:
21
- encrypted = f.read()
22
-
23
- salt = encrypted[:16]
24
- iv = encrypted[16:32]
25
- ciphertext = encrypted[32:]
26
-
27
- key = PBKDF2(password, salt, dkLen=32, count=1000000)
28
- cipher = AES.new(key, AES.MODE_CBC, iv)
29
-
30
- plaintext = unpad(cipher.decrypt(ciphertext))
31
-
32
- with tempfile.NamedTemporaryFile(suffix=".py", delete=False, mode='wb') as tmp:
33
- tmp.write(plaintext)
34
- tmp.flush()
35
- print(f"[INFO] Running decrypted code from {tmp.name}")
36
- os.system(f"python {tmp.name}")
37
-
38
- if __name__ == "__main__":
39
- decrypt_and_run()
40
-
41
- # This script decrypts the encrypted code and runs it.
42
- # Ensure you have the PASSWORD secret set in your Hugging Face Secrets
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import PyPDF2
3
+ import fitz # PyMuPDF
4
+ import io
5
+ import os
6
+ from together import Together
7
+ from openai import OpenAI
8
+ import tempfile
9
+ import traceback
10
+ import base64
11
+ from io import BytesIO
12
+ from PIL import Image
13
+
14
+ def extract_text_from_pdf(pdf_file):
15
+ """Extract text from uploaded PDF file"""
16
+ try:
17
+ if pdf_file is None:
18
+ return None, "No PDF file provided"
19
+
20
+ # Read the PDF file
21
+ with open(pdf_file.name, 'rb') as file:
22
+ pdf_reader = PyPDF2.PdfReader(file)
23
+ text = ""
24
+
25
+ # Extract text from all pages
26
+ for page_num in range(len(pdf_reader.pages)):
27
+ page = pdf_reader.pages[page_num]
28
+ text += page.extract_text() + "\n"
29
+
30
+ if not text.strip():
31
+ return None, "Could not extract text from PDF. The PDF might be image-based or encrypted."
32
+
33
+ return text, None
34
+
35
+ except Exception as e:
36
+ return None, f"Error reading PDF: {str(e)}"
37
+
38
+ def extract_images_from_pdf(pdf_file_path):
39
+ """Extract all images from PDF and return them as PIL Images with page info"""
40
+ images = []
41
+
42
+ # Open the PDF
43
+ pdf_document = fitz.open(pdf_file_path)
44
+
45
+ for page_num in range(len(pdf_document)):
46
+ page = pdf_document.load_page(page_num)
47
+
48
+ # Get image list from the page
49
+ image_list = page.get_images(full=True)
50
+
51
+ for img_index, img in enumerate(image_list):
52
+ # Get the XREF of the image
53
+ xref = img[0]
54
+
55
+ # Extract the image bytes
56
+ base_image = pdf_document.extract_image(xref)
57
+ image_bytes = base_image["image"]
58
+
59
+ # Convert to PIL Image
60
+ pil_image = Image.open(BytesIO(image_bytes))
61
+
62
+ images.append({
63
+ 'image': pil_image,
64
+ 'page': page_num + 1,
65
+ 'index': img_index + 1,
66
+ 'format': base_image["ext"]
67
+ })
68
+
69
+ pdf_document.close()
70
+ return images
71
+
72
+ def convert_pdf_pages_to_images(pdf_file_path, dpi=150):
73
+ """Convert each PDF page to an image for comprehensive analysis"""
74
+ images = []
75
+
76
+ # Open the PDF
77
+ pdf_document = fitz.open(pdf_file_path)
78
+
79
+ for page_num in range(len(pdf_document)):
80
+ page = pdf_document.load_page(page_num)
81
+
82
+ # Convert page to image
83
+ mat = fitz.Matrix(dpi/72, dpi/72) # zoom factor
84
+ pix = page.get_pixmap(matrix=mat)
85
+
86
+ # Convert to PIL Image
87
+ img_data = pix.tobytes("png")
88
+ pil_image = Image.open(BytesIO(img_data))
89
+
90
+ images.append({
91
+ 'image': pil_image,
92
+ 'page': page_num + 1,
93
+ 'type': 'full_page'
94
+ })
95
+
96
+ pdf_document.close()
97
+ return images
98
+
99
+ def encode_image_to_base64(pil_image):
100
+ """Convert PIL Image to base64 string"""
101
+ buffered = BytesIO()
102
+ pil_image.save(buffered, format="PNG")
103
+ img_str = base64.b64encode(buffered.getvalue()).decode()
104
+ return f"data:image/png;base64,{img_str}"
105
+
106
+ def summarize_with_together(api_key, text):
107
+ """Generate summary using Together API"""
108
+ try:
109
+ if not api_key or not api_key.strip():
110
+ return "Please enter your Together API key"
111
+
112
+ if not text or not text.strip():
113
+ return "No text to summarize"
114
+
115
+ # Initialize Together client
116
+ client = Together(api_key=api_key.strip())
117
+
118
+ # System prompt for summarization
119
+ system_prompt = """You are a highly capable language model specialized in document summarization. Your task is to read and understand the full content of a multi-page PDF document and generate a clear, accurate, and detailed summary of the entire document.
120
+ Focus on capturing all main ideas, key points, arguments, findings, and conclusions presented throughout the document. If the document is technical, legal, academic, or contains structured sections (e.g., introduction, methods, results, discussion), maintain the logical flow and structure while expressing the content in a comprehensive and accessible manner.
121
+ Avoid unnecessary simplification. Include important details, supporting evidence, and nuanced insights that reflect the depth of the original material. Do not copy the text verbatim.
122
+ Output only the summary. Do not explain your process. Use a neutral, professional, and informative tone. The summary should provide a full understanding of the document to someone who has not read it."""
123
+
124
+ # Create completion with optimal hyperparameters
125
+ completion = client.chat.completions.create(
126
+ model="lgai/exaone-3-5-32b-instruct",
127
+ messages=[
128
+ {
129
+ "role": "system",
130
+ "content": system_prompt
131
+ },
132
+ {
133
+ "role": "user",
134
+ "content": f"Please summarize the following document:\n\n{text}"
135
+ }
136
+ ],
137
+ temperature=0.3, # Low randomness for factual, focused summaries
138
+ max_tokens=2048, # Increased to allow longer summaries
139
+ top_p=0.9, # Allows some diversity while still grounded
140
+ stream=False
141
+ )
142
+
143
+ summary = completion.choices[0].message.content
144
+ return summary
145
+
146
+ except Exception as e:
147
+ error_msg = f"Error generating summary: {str(e)}"
148
+ if "authentication" in str(e).lower() or "api" in str(e).lower():
149
+ error_msg += "\n\nPlease check your Together API key and ensure it's valid."
150
+ return error_msg
151
+
152
+ def analyze_images_with_together(together_api_key, openrouter_api_key, images):
153
+ """Analyze images using OpenRouter API with mistralai/mistral-small-3.2-24b-instruct:free model"""
154
+ if not openrouter_api_key:
155
+ return "❌ Please enter your OpenRouter API key."
156
+
157
+ try:
158
+ # Initialize OpenRouter client
159
+ client = OpenAI(
160
+ base_url="https://openrouter.ai/api/v1",
161
+ api_key=openrouter_api_key.strip(),
162
+ )
163
+
164
+ results = []
165
+
166
+ for idx, img_data in enumerate(images):
167
+ # Encode image to base64
168
+ base64_image = encode_image_to_base64(img_data['image'])
169
+
170
+ # Prepare messages for the API call
171
+ messages = [
172
+ {
173
+ "role": "system",
174
+ "content": """You are an advanced language model with strong capabilities in visual and textual understanding. Your task is to analyze all images, diagrams, and flowcharts within a PDF document. This includes:
175
+ 1. Extracting and interpreting text from images and flowcharts.
176
+ 2. Understanding the visual structure, logic, and relationships depicted in diagrams.
177
+ 3. Summarizing the content and purpose of each visual element in a clear and informative manner.
178
+ After processing, be ready to answer user questions about any of the images or flowcharts, including their meaning, structure, dedication, process flows, or relationships shown.
179
+ Be accurate, concise, and visually aware. Clearly explain visual content in text form. Do not guess if visual data is unclear or ambiguous β€” instead, state what is observable.
180
+ Use a neutral, helpful tone. Do not include irrelevant information or commentary unrelated to the visual content. When summarizing or answering questions, assume the user may not have access to the original image or diagram."""
181
+ },
182
+ {
183
+ "role": "user",
184
+ "content": [
185
+ {
186
+ "type": "text",
187
+ "text": f"Please analyze this image from page {img_data.get('page', 'unknown')} of the PDF document. Provide a detailed analysis of all visual elements, text, diagrams, flowcharts, and their relationships."
188
+ },
189
+ {
190
+ "type": "image_url",
191
+ "image_url": {
192
+ "url": base64_image
193
+ }
194
+ }
195
+ ]
196
+ }
197
+ ]
198
+
199
+ # Make API call with optimal parameters
200
+ try:
201
+ completion = client.chat.completions.create(
202
+ model="mistralai/mistral-small-3.2-24b-instruct:free",
203
+ messages=messages,
204
+ temperature=0.2,
205
+ max_tokens=2048,
206
+ top_p=0.85,
207
+ extra_headers={
208
+ "HTTP-Referer": "https://example.com", # Replace with your site URL if applicable
209
+ "X-Title": "Advanced PDF Analyzer" # Replace with your site title if applicable
210
+ },
211
+ stream=False
212
+ )
213
+
214
+ # Check if completion.choices is valid
215
+ if not completion.choices or len(completion.choices) == 0:
216
+ results.append(f"## πŸ“„ Page {img_data.get('page', 'N/A')} - Error\n\nNo valid response received from OpenRouter API for this image.\n\n---\n")
217
+ continue
218
+
219
+ analysis = completion.choices[0].message.content
220
+
221
+ page_info = f"Page {img_data.get('page', 'N/A')}"
222
+ if 'index' in img_data:
223
+ page_info += f", Image {img_data['index']}"
224
+ elif 'type' in img_data and img_data['type'] == 'full_page':
225
+ page_info += " (Full Page)"
226
+
227
+ results.append(f"## πŸ“„ {page_info}\n\n{analysis}\n\n---\n")
228
+
229
+ except Exception as img_error:
230
+ page_info = f"Page {img_data.get('page', 'N/A')}"
231
+ if 'index' in img_data:
232
+ page_info += f", Image {img_data['index']}"
233
+ elif 'type' in img_data and img_data['type'] == 'full_page':
234
+ page_info += " (Full Page)"
235
+ results.append(f"## πŸ“„ {page_info}\n\n❌ Error analyzing image: {str(img_error)}\n\n---\n")
236
+
237
+ if not results:
238
+ return "⚠️ No images found in the PDF document."
239
+
240
+ return "\n".join(results)
241
+
242
+ except Exception as e:
243
+ error_msg = f"❌ Error analyzing images: {str(e)}"
244
+ if "authentication" in str(e).lower() or "api" in str(e).lower():
245
+ error_msg += "\n\nPlease check your OpenRouter API key and ensure it's valid."
246
+ return error_msg
247
+
248
+ def process_pdf_text_summary(together_api_key, pdf_file, progress=gr.Progress()):
249
+ """Process PDF and generate text summary"""
250
+ try:
251
+ if not together_api_key or not together_api_key.strip():
252
+ return "οΏ½ viral Please enter your Together API key", "", ""
253
+
254
+ if pdf_file is None:
255
+ return "❌ Please upload a PDF file", "", ""
256
+
257
+ progress(0.1, desc="Reading PDF file...")
258
+
259
+ # Extract text from PDF
260
+ text, error = extract_text_from_pdf(pdf_file)
261
+ if error:
262
+ return f"❌ {error}", "", ""
263
+
264
+ progress(0.4, desc="Text extracted successfully...")
265
+
266
+ # Show preview of extracted text
267
+ text_preview = text[:500] + "..." if len(text) > 500 else text
268
+
269
+ progress(0.6, desc="Generating summary with Together AI... Please wait for few minutes...")
270
+
271
+ # Generate summary
272
+ summary = summarize_with_together(together_api_key, text)
273
+
274
+ progress(1.0, desc="Summary generated successfully!")
275
+
276
+ return "βœ… Summary generated successfully!", text_preview, summary
277
+
278
+ except Exception as e:
279
+ error_traceback = traceback.format_exc()
280
+ return f"❌ Unexpected error: {str(e)}\n\nTraceback:\n{error_traceback}", "", ""
281
+
282
+ def process_pdf_image_analysis(together_api_key, openrouter_api_key, pdf_file, analysis_method, progress=gr.Progress()):
283
+ """Process PDF and analyze images"""
284
+ if pdf_file is None:
285
+ return "⚠️ Please upload a PDF file."
286
+
287
+ if not openrouter_api_key or openrouter_api_key.strip() == "":
288
+ return "⚠️ Please enter your OpenRouter API key."
289
+
290
+ try:
291
+ progress(0.1, desc="Processing PDF file...")
292
+
293
+ # Create temporary file for PDF processing
294
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
295
+ # Write uploaded file content to temporary file
296
+ if hasattr(pdf_file, 'read'):
297
+ tmp_file.write(pdf_file.read())
298
+ else:
299
+ with open(pdf_file.name, 'rb') as f:
300
+ tmp_file.write(f.read())
301
+ tmp_file_path = tmp_file.name
302
+
303
+ progress(0.3, desc="Extracting images...")
304
+
305
+ images_to_analyze = []
306
+
307
+ if analysis_method == "Extract embedded images only":
308
+ # Extract only embedded images
309
+ images_to_analyze = extract_images_from_pdf(tmp_file_path)
310
+ if not images_to_analyze:
311
+ return "⚠️ No embedded images found in the PDF. Try 'Full page analysis' to analyze the entire content."
312
+
313
+ elif analysis_method == "Full page analysis":
314
+ # Convert each page to image for comprehensive analysis
315
+ images_to_analyze = convert_pdf_pages_to_images(tmp_file_path)
316
+
317
+ else: # Both methods
318
+ # First try embedded images
319
+ embedded_images = extract_images_from_pdf(tmp_file_path)
320
+ # Then add full page analysis
321
+ page_images = convert_pdf_pages_to_images(tmp_file_path)
322
+ images_to_analyze = embedded_images + page_images
323
+
324
+ # Clean up temporary file
325
+ os.unlink(tmp_file_path)
326
+
327
+ if not images_to_analyze:
328
+ return "⚠️ No visual content found in the PDF document."
329
+
330
+ progress(0.7, desc="Analyzing images with AI... This may take a while depending on the number of images.")
331
+
332
+ # Analyze images with OpenRouter
333
+ analysis_result = analyze_images_with_together(together_api_key, openrouter_api_key, images_to_analyze)
334
+
335
+ progress(1.0, desc="Analysis complete!")
336
+
337
+ return analysis_result
338
+
339
+ except Exception as e:
340
+ return f"❌ Error processing PDF: {str(e)}"
341
+
342
+ def clear_all_text():
343
+ """Clear all text analysis fields"""
344
+ return "", "", None, "", "", ""
345
+
346
+ def clear_all_image():
347
+ """Clear all image analysis fields"""
348
+ return "", "", None, "Full page analysis", ""
349
+
350
+ # Custom CSS for better styling
351
+ css = """
352
+ .gradio-container {
353
+ max-width: 1400px !important;
354
+ margin: auto !important;
355
+ }
356
+ .main-header {
357
+ text-align: center;
358
+ margin-bottom: 2rem;
359
+ }
360
+ .status-success {
361
+ color: #28a745 !important;
362
+ }
363
+ .status-error {
364
+ color: #dc3545 !important;
365
+ }
366
+ .info-box {
367
+ background-color: #f8f9fa;
368
+ padding: 1rem;
369
+ border-radius: 0.5rem;
370
+ border-left: 4px solid #007bff;
371
+ margin: 1rem 0;
372
+ }
373
+ .feature-box {
374
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
375
+ padding: 1.5rem;
376
+ border-radius: 1rem;
377
+ color: white;
378
+ margin: 1rem 0;
379
+ }
380
+ """
381
+
382
+ # Create Gradio interface
383
+ with gr.Blocks(css=css, title="Advanced PDF Analyzer with Together AI and OpenRouter") as demo:
384
+ # Header
385
+ gr.HTML("""
386
+ <div class="main-header">
387
+ <h1>πŸš€ Advanced PDF Analyzer with Together AI and OpenRouter</h1>
388
+ <p>Comprehensive PDF analysis tool - Extract text summaries with Together AI and analyze images/diagrams with OpenRouter AI</p>
389
+ </div>
390
+ """)
391
+
392
+ # Feature overview
393
+ gr.HTML("""
394
+ <div class="feature-box">
395
+ <h3>✨ What this tool can do:</h3>
396
+ <div style="display: grid; grid-template-columns: 1fr 1fr; gap: 1rem; margin-top: 1rem;">
397
+ <div>
398
+ <h4>πŸ“ Text Analysis:</h4>
399
+ <ul>
400
+ <li>Extract and summarize text content</li>
401
+ <li>Generate comprehensive document summaries</li>
402
+ <li>Maintain logical structure and key insights</li>
403
+ </ul>
404
+ </div>
405
+ <div>
406
+ <h4>πŸ–ΌοΈ Visual Analysis:</h4>
407
+ <ul>
408
+ <li>Analyze embedded images and diagrams</li>
409
+ <li>Process flowcharts and technical drawings</li>
410
+ <li>Extract text from images (OCR)</li>
411
+ </ul>
412
+ </div>
413
+ </div>
414
+ </div>
415
+ """)
416
+
417
+ # API Key section
418
+ gr.HTML("""
419
+ <div class="info-box">
420
+ <strong>πŸ”‘ How to get your API Keys:</strong><br>
421
+ <h4>Together API Key (for Text Summarization):</h4>
422
+ 1. Visit <a href="https://api.together.ai/" target="_blank">api.together.ai</a><br>
423
+ 2. Sign up or log in to your account<br>
424
+ 3. Navigate to API Keys section<br>
425
+ 4. Create a new API key and copy it<br>
426
+ 5. Paste it in the Together API key field below<br>
427
+ <h4>OpenRouter API Key (for Image Analysis):</h4>
428
+ 1. Visit <a href="https://openrouter.ai/" target="_blank">openrouter.ai</a><br>
429
+ 2. Sign up or log in to your account<br>
430
+ 3. Navigate to API Keys section<br>
431
+ 4. Create a new API key and copy it<br>
432
+ 5. Paste it in the OpenRouter API key field below
433
+ </div>
434
+ """)
435
+
436
+ # API Key inputs
437
+ together_api_key_input = gr.Textbox(
438
+ label="πŸ”‘ Together API Key (for Text Summarization)",
439
+ placeholder="Enter your Together API key here...",
440
+ type="password",
441
+ info="Your Together API key is used for text summarization and is not stored."
442
+ )
443
+ openrouter_api_key_input = gr.Textbox(
444
+ label="πŸ”‘ OpenRouter API Key (for Image Analysis)",
445
+ placeholder="Enter your OpenRouter API key here...",
446
+ type="password",
447
+ info="Your OpenRouter API key is used for image analysis and is not stored."
448
+ )
449
+
450
+ # Tabs for different functionalities
451
+ with gr.Tabs():
452
+ # Text Summary Tab
453
+ with gr.TabItem("πŸ“ Text Summary", elem_id="text-tab"):
454
+ with gr.Row():
455
+ with gr.Column(scale=1):
456
+ gr.Markdown("## πŸ“„ Text Analysis")
457
+
458
+ pdf_file_text = grδΈ‰.File(
459
+ label="Upload PDF Document",
460
+ file_types=[".pdf"]
461
+ )
462
+
463
+ with gr.Row():
464
+ summarize_btn = gr.Button("πŸ“‹ Generate Text Summary", variant="primary", size="lg")
465
+ clear_text_btn = gr.Button("πŸ—‘οΈ Clear All", variant="secondary")
466
+
467
+ with gr.Column(scale=2):
468
+ gr.Markdown("## πŸ“Š Text Analysis Results")
469
+ status_text_output = gr.Textbox(
470
+ label="Status",
471
+ interactive=False,
472
+ show_label=True
473
+ )
474
+
475
+ with gr.Tabs():
476
+ with gr.TabItem("πŸ“ Summary"):
477
+ summary_output = gr.Textbox(
478
+ label="AI Generated Summary",
479
+ lines=15,
480
+ interactive=False,
481
+ placeholder="Your PDF summary will appear here...",
482
+ show_copy_button=True
483
+ )
484
+
485
+ with gr.TabItem("πŸ“„ Extracted Text Preview"):
486
+ text_preview_output = gr.Textbox(
487
+ label="Extracted Text (First 500 characters)",
488
+ lines=10,
489
+ interactive=False,
490
+ placeholder="Preview of extracted text will appear here...",
491
+ show_copy_button=True
492
+ )
493
+
494
+ # Image Analysis Tab
495
+ with gr.TabItem("πŸ–ΌοΈ Image Analysis", elem_id="image-tab"):
496
+ with gr.Row():
497
+ with gr.Column(scale=1):
498
+ gr.Markdown("## πŸ” Visual Analysis")
499
+
500
+ pdf_file_image = gr.File(
501
+ label="Upload PDF Document",
502
+ file_types=[".pdf"]
503
+ )
504
+
505
+ analysis_method = gr.Radio(
506
+ choices=[
507
+ "Extract embedded images only",
508
+ "Full page analysis",
509
+ "Both (embedded + full pages)"
510
+ ],
511
+ value="Full page analysis",
512
+ label="Analysis Method",
513
+ info="Choose how to analyze the PDF content"
514
+ )
515
+
516
+ with gr.Row():
517
+ analyze_images_btn = gr.Button("πŸ” Analyze Images", variant="primary", size="lg")
518
+ clear_image_btn = gr.Button("πŸ—‘οΈ Clear All", variant="secondary")
519
+
520
+ with gr.Column(scale=2):
521
+ gr.Markdown("## πŸ“Š Image Analysis Results")
522
+ image_analysis_output = gr.Textbox(
523
+ label="Visual Analysis Results",
524
+ lines=20,
525
+ max_lines=50,
526
+ show_copy_button=True,
527
+ placeholder="Image analysis results will appear here..."
528
+ )
529
+
530
+ # Usage instructions
531
+ gr.HTML("""
532
+ <div class="info-box">
533
+ <strong>πŸ“‹ Usage Instructions:</strong><br>
534
+ <h4>For Text Summary:</h4>
535
+ 1. Enter your Together API key above<br>
536
+ 2. Go to "Text Summary" tab<br>
537
+ 3. Upload a PDF document<br>
538
+ 4. Click "Generate Text Summary"<br>
539
+
540
+ <h4>For Image Analysis:</h4>
541
+ 1. Enter your OpenRouter API key above<br>
542
+ 2. Go to "Image Analysis" tab<br>
543
+ 3. Upload a PDF document<br>
544
+ 4. Choose analysis method<br>
545
+ 5. Click "Analyze Images"<br>
546
+
547
+ <h4>Analysis Methods:</h4>
548
+ β€’ <strong>Extract embedded images:</strong> Analyzes only images embedded within the PDF<br>
549
+ β€’ <strong>Full page analysis:</strong> Converts each page to image for comprehensive analysis (recommended)<br>
550
+ β€’ <strong>Both:</strong> Combines both methods for maximum coverage
551
+ </div>
552
+ """)
553
+
554
+ # Model information
555
+ gr.HTML("""
556
+ <div style="margin-top: 2rem; padding: 1rem; background-color: #e9ecef; border-radius: 0.5rem;">
557
+ <strong>πŸ€– Model Information:</strong><br>
558
+ β€’ <strong>Text Analysis:</strong> lgai/exaone-3-5-32b-instruct via Together AI (optimized for summarization)<br>
559
+ β€’ <strong>Image Analysis:</strong> mistralai/mistral-small-3.2-24b-instruct:free via OpenRouter (with vision capabilities)<br>
560
+ β€’ <strong>Temperature:</strong> 0.2-0.3 (focused, factual analysis)<br>
561
+ β€’ <strong>Max Tokens:</strong> 2048 (comprehensive outputs)
562
+ </div>
563
+ """)
564
+
565
+ # Event handlers for text summary
566
+ summarize_btn.click(
567
+ fn=process_pdf_text_summary,
568
+ inputs=[together_api_key_input, pdf_file_text],
569
+ outputs=[status_text_output, text_preview_output, summary_output],
570
+ show_progress=True
571
+ )
572
+
573
+ clear_text_btn.click(
574
+ fn=clear_all_text,
575
+ outputs=[together_api_key_input, openrouter_api_key_input, pdf_file_text, status_text_output, text_preview_output, summary_output]
576
+ )
577
+
578
+ # Event handlers for image analysis
579
+ analyze_images_btn.click(
580
+ fn=process_pdf_image_analysis,
581
+ inputs=[together_api_key_input, openrouter_api_key_input, pdf_file_image, analysis_method],
582
+ outputs=[image_analysis_output],
583
+ show_progress=True
584
+ )
585
+
586
+ clear_image_btn.click(
587
+ fn=clear_all_image,
588
+ outputs=[together_api_key_input, openrouter_api_key_input, pdf_file_image, analysis_method, image_analysis_output]
589
+ )
590
+
591
+ # Launch the app
592
+ if __name__ == "__main__":
593
+ print("πŸš€ Starting Advanced PDF Analyzer App...")
594
+ print("πŸ“‹ Make sure you have the required packages installed:")
595
+ print(" pip install gradio PyPDF2 PyMuPDF pillow together openai")
596
+ print("\nπŸ”‘ Don't forget to get your API keys:")
597
+ print(" - Together API key from: https://api.together.ai/")
598
+ print(" - OpenRouter API key from: https://openrouter.ai/")
599
+
600
+ demo.launch(
601
+ share=True
602
+ )