samyak152002 commited on
Commit
6f96666
·
verified ·
1 Parent(s): 36c10b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +443 -377
app.py CHANGED
@@ -1,89 +1,128 @@
 
 
 
 
1
  import re
 
 
2
  import fitz # PyMuPDF
3
- from pdfminer.high_level import extract_text
4
- from pdfminer.layout import LAParams
5
  import language_tool_python
6
- from typing import List, Dict, Any, Tuple
7
- from collections import Counter
8
  import json
9
  import traceback
10
  import io
11
  import tempfile
12
- import os
13
  import gradio as gr
14
 
15
- # Set JAVA_HOME environment variable
16
  os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-11-openjdk-amd64'
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  # ------------------------------
19
- # Analysis Functions
20
  # ------------------------------
21
 
22
- # def extract_pdf_text_by_page(file) -> List[str]:
23
- # """Extracts text from a PDF file, page by page, using PyMuPDF."""
24
- # if isinstance(file, str):
25
- # with fitz.open(file) as doc:
26
- # return [page.get_text("text") for page in doc]
27
- # else:
28
- # with fitz.open(stream=file.read(), filetype="pdf") as doc:
29
- # return [page.get_text("text") for page in doc]
30
-
31
- def extract_pdf_text(file) -> str:
32
- """Extracts full text from a PDF file using PyMuPDF4LLM."""
33
  try:
34
- print(f"Opening PDF file: {file}")
35
-
36
- # Handle file path vs stream
37
- temp_file_path = None
38
- if isinstance(file, str):
39
- print(f"Opening file by path: {file}")
40
- file_path = file
 
 
41
  else:
42
- print(f"Opening file from stream")
43
- import tempfile
44
- import os
45
- temp_file = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
46
- temp_file_path = temp_file.name
47
- temp_file.write(file.read())
48
- temp_file.close()
49
- file_path = temp_file_path
50
-
51
- # Get page count with PyMuPDF for logging purposes
52
- doc = fitz.open(file_path)
53
- page_count = len(doc)
54
- doc.close()
55
- print(f"PDF opened successfully with {page_count} pages")
56
-
57
- # Process with pymupdf4llm
58
- import pymupdf4llm
59
- full_text = pymupdf4llm.to_markdown(file_path)
60
-
61
- # Log extraction info for each page (approximating per-page counts)
62
- avg_chars_per_page = len(full_text) // page_count if page_count > 0 else 0
63
- for page_number in range(page_count):
64
- print(f"Extracted {avg_chars_per_page} characters from page {page_number+1}")
65
 
66
- # Clean up temporary file if created
67
- if temp_file_path:
68
- os.remove(temp_file_path)
69
 
70
- print(f"Total extracted text length: {len(full_text)} characters.")
71
- print(full_text)
72
- return full_text
73
 
74
  except Exception as e:
75
  print(f"Error extracting text from PDF: {str(e)}")
76
- import traceback
77
- print(traceback.format_exc())
78
  return ""
 
 
 
79
 
80
 
81
  def check_text_presence(full_text: str, search_terms: List[str]) -> Dict[str, bool]:
82
- """Checks for the presence of required terms in the text."""
83
  return {term: term.lower() in full_text.lower() for term in search_terms}
84
 
85
  def label_authors(full_text: str) -> str:
86
- """Label authors in the text with 'Authors:' if not already labeled."""
 
87
  author_line_regex = r"^(?:.*\n)(.*?)(?:\n\n)"
88
  match = re.search(author_line_regex, full_text, re.MULTILINE)
89
  if match:
@@ -91,396 +130,403 @@ def label_authors(full_text: str) -> str:
91
  return full_text.replace(authors, f"Authors: {authors}")
92
  return full_text
93
 
94
- def check_metadata(full_text: str) -> Dict[str, Any]:
95
- """Check for metadata elements."""
96
  return {
97
- "author_email": bool(re.search(r'\b[\w.-]+?@\w+?\.\w+?\b', full_text)),
98
- "list_of_authors": bool(re.search(r'Authors?:', full_text, re.IGNORECASE)),
99
- "keywords_list": bool(re.search(r'Keywords?:', full_text, re.IGNORECASE)),
100
- "word_count": len(full_text.split()) or "Missing"
101
  }
102
 
103
- def check_disclosures(full_text: str) -> Dict[str, bool]:
104
- """Check for disclosure statements."""
105
- # Regular search terms
106
  search_terms = [
107
  "conflict of interest statement",
108
  "ethics statement",
109
  "funding statement",
110
  "data access statement"
111
  ]
112
-
113
- # Get results for regular terms
114
- results = check_text_presence(full_text, search_terms)
115
-
116
- # Special check for author contribution(s) statement - either singular or plural form
117
- has_author_contribution = ("author contribution statement" in full_text.lower() or
118
- "author contributions statement" in full_text.lower())
119
-
120
- # Add the author contribution result to our results dictionary
121
  results["author contribution statement"] = has_author_contribution
122
-
123
  return results
124
 
125
- def check_figures_and_tables(full_text: str) -> Dict[str, bool]:
126
- """Check for figures and tables."""
127
  return {
128
- "figures_with_citations": bool(re.search(r'Figure \d+.*?citation', full_text, re.IGNORECASE)),
129
- "figures_legends": bool(re.search(r'Figure \d+.*?legend', full_text, re.IGNORECASE)),
130
- "tables_legends": bool(re.search(r'Table \d+.*?legend', full_text, re.IGNORECASE))
131
  }
132
 
133
- def check_references(full_text: str) -> Dict[str, Any]:
134
- """Check for references."""
135
  return {
136
- "old_references": bool(re.search(r'\b19[0-9]{2}\b', full_text)),
137
- "citations_in_abstract": bool(re.search(r'\b(citation|reference)\b', full_text[:1000], re.IGNORECASE)),
138
- "reference_count": len(re.findall(r'\[.*?\]', full_text)),
139
- "self_citations": bool(re.search(r'Self-citation', full_text, re.IGNORECASE))
 
140
  }
141
 
142
- def check_structure(full_text: str) -> Dict[str, bool]:
143
- """Check document structure."""
144
  return {
145
- "imrad_structure": all(section in full_text for section in ["Introduction", "Methods", "Results", "Discussion"]),
146
- "abstract_structure": "structured abstract" in full_text.lower()
147
  }
148
 
149
- def check_language_issues(full_text: str) -> Dict[str, Any]:
150
- """Check for language issues using LanguageTool and additional regex patterns."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  try:
152
- language_tool = language_tool_python.LanguageTool('en-US')
153
- matches = language_tool.check(full_text)
154
- issues = []
155
 
156
- # Process LanguageTool matches
157
- for match in matches:
158
- # Ignore issues with rule_id 'EN_SPLIT_WORDS_HYPHEN'
159
- if match.ruleId == "EN_SPLIT_WORDS_HYPHEN":
 
 
160
  continue
161
-
162
- issues.append({
163
- "message": match.message,
164
- "context": match.context.strip(),
165
- "suggestions": match.replacements[:3] if match.replacements else [],
166
- "category": match.category,
167
- "rule_id": match.ruleId,
168
- "offset": match.offset,
169
- "length": match.errorLength,
170
- "coordinates": [],
171
- "page": 0
 
 
 
 
172
  })
173
- print(f"Total language issues found: {len(issues)}")
174
 
175
- # -----------------------------------
176
- # Additions: Regex-based Issue Detection
177
- # -----------------------------------
178
-
179
- # Define regex pattern to find words immediately followed by '[' without space
180
  regex_pattern = r'\b(\w+)\[(\d+)\]'
181
- regex_matches = list(re.finditer(regex_pattern, full_text))
182
- print(f"Total regex issues found: {len(regex_matches)}")
183
 
184
- # Process regex matches
185
- for match in regex_matches:
 
 
 
 
 
186
  word = match.group(1)
187
  number = match.group(2)
188
- start = match.start()
189
- end = match.end()
190
- issues.append({
191
- "message": f"Missing space before '[' in '{word}[{number}]'. Should be '{word} [{number}]'.",
192
- "context": full_text[max(match.start() - 30, 0):min(match.end() + 30, len(full_text))].strip(),
193
- "suggestions": [f"{word} [{number}]", f"{word} [`{number}`]", f"{word} [number {number}]"],
194
- "category": "Formatting",
195
- "rule_id": "SPACE_BEFORE_BRACKET",
196
- "offset": match.start(),
197
- "length": match.end() - match.start(),
198
- "coordinates": [],
199
- "page": 0
200
  })
201
-
202
- print(f"Total combined issues found: {len(issues)}")
203
 
204
  return {
205
- "total_issues": len(issues),
206
- "issues": issues
 
207
  }
208
  except Exception as e:
209
- print(f"Error checking language issues: {e}")
210
- return {"error": str(e)}
211
-
212
- def check_language(full_text: str) -> Dict[str, Any]:
213
- """Check language quality."""
214
- return {
215
- "plain_language": bool(re.search(r'plain language summary', full_text, re.IGNORECASE)),
216
- "readability_issues": False, # Placeholder for future implementation
217
- "language_issues": check_language_issues(full_text)
218
- }
219
 
220
- def check_figure_order(full_text: str) -> Dict[str, Any]:
221
- """Check if figures are referred to in sequential order."""
222
  figure_pattern = r'(?:Fig(?:ure)?\.?|Figure)\s*(\d+)'
223
- figure_references = re.findall(figure_pattern, full_text, re.IGNORECASE)
224
- figure_numbers = sorted(set(int(num) for num in figure_references))
225
 
226
- is_sequential = all(a + 1 == b for a, b in zip(figure_numbers, figure_numbers[1:]))
 
 
 
227
 
228
- if figure_numbers:
229
- expected_figures = set(range(1, max(figure_numbers) + 1))
230
- missing_figures = list(expected_figures - set(figure_numbers))
231
- else:
232
- missing_figures = None
233
 
234
- duplicates = [num for num, count in Counter(figure_references).items() if count > 1]
235
- duplicate_numbers = [int(num) for num in duplicates]
236
- not_mentioned = list(set(figure_references) - set(duplicates))
 
 
 
 
237
 
238
  return {
239
- "sequential_order": is_sequential,
240
- "figure_count": len(figure_numbers),
241
- "missing_figures": missing_figures,
242
- "figure_order": figure_numbers,
243
- "duplicate_references": duplicates,
244
- "not_mentioned": not_mentioned
245
  }
246
 
247
- def check_reference_order(full_text: str) -> Dict[str, Any]:
248
- """Check if references in the main body text are in order."""
249
- reference_pattern = r'\[(\d+)\]'
250
- references = re.findall(reference_pattern, full_text)
251
- ref_numbers = [int(ref) for ref in references]
252
 
253
- max_ref = 0
254
- out_of_order = []
255
- for i, ref in enumerate(ref_numbers):
256
- if ref > max_ref + 1:
257
- out_of_order.append((i+1, ref))
258
- max_ref = max(max_ref, ref)
259
 
260
- all_refs = set(range(1, max_ref + 1))
261
- used_refs = set(ref_numbers)
262
- missing_refs = list(all_refs - used_refs)
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
 
 
264
  return {
265
- "max_reference": max_ref,
266
- "out_of_order": out_of_order,
267
- "missing_references": missing_refs,
268
- "is_ordered": len(out_of_order) == 0 and len(missing_refs) == 0
269
  }
270
 
271
- def highlight_issues_in_pdf(file, language_matches: List[Dict[str, Any]]) -> bytes:
272
- """
273
- Highlights language issues in the PDF and returns the annotated PDF as bytes.
274
- This function maps LanguageTool matches to specific words in the PDF
275
- and highlights those words.
276
- """
277
- try:
278
- # Open the PDF
279
- doc = fitz.open(stream=file.read(), filetype="pdf") if not isinstance(file, str) else fitz.open(file)
280
- # print(f"Opened PDF with {len(doc)} pages.")
281
- # print(language_matches)
282
- # Extract words with positions from each page
283
- word_list = [] # List of tuples: (page_number, word, x0, y0, x1, y1)
284
- for page_number in range(len(doc)):
285
- page = doc[page_number]
286
- print(page.get_text("words"))
287
- words = page.get_text("words") # List of tuples: (x0, y0, x1, y1, "word", block_no, line_no, word_no)
288
- for w in words:
289
- # print(w)
290
- word_text = w[4]
291
- # **Fix:** Insert a space before '[' to ensure "globally [2]" instead of "globally[2]"
292
- # if '[' in word_text:
293
- # word_text = word_text.replace('[', ' [')
294
- word_list.append((page_number, word_text, w[0], w[1], w[2], w[3]))
295
- # print(f"Total words extracted: {len(word_list)}")
296
-
297
- # Concatenate all words to form the full text
298
- concatenated_text=""
299
- concatenated_text = " ".join([w[1] for w in word_list])
300
-
301
- # print(f"Concatenated text length: {concatenated_text} characters.")
302
-
303
- # Find "Abstract" section and set the processing start point
304
- abstract_start = concatenated_text.lower().find("abstract")
305
- abstract_offset = 0 if abstract_start == -1 else abstract_start
306
-
307
- # Find "References" section and exclude from processing
308
- references_start = concatenated_text.lower().rfind("references")
309
- references_offset = len(concatenated_text) if references_start == -1 else references_start
310
-
311
- # Iterate over each language issue
312
- for idx, issue in enumerate(language_matches, start=1):
313
- offset = issue["offset"] # offset+line_no-1
314
- length = issue["length"]
315
-
316
- # Skip issues in the references section
317
- if offset < abstract_offset or offset >= references_offset:
318
- continue
319
-
320
-
321
- error_text = concatenated_text[offset:offset+length]
322
- print(f"\nIssue {idx}: '{error_text}' at offset {offset} with length {length}")
323
-
324
- # Find the words that fall within the error span
325
- current_pos = 0
326
- target_words = []
327
- for word in word_list:
328
- word_text = word[1]
329
- word_length = len(word_text) + 1 # +1 for the space
330
-
331
- if current_pos + word_length > offset and current_pos < offset + length:
332
- target_words.append(word)
333
- current_pos += word_length
334
-
335
- if not target_words:
336
- # print("No matching words found for this issue.")
337
- continue
338
-
339
- initial_x = target_words[0][2]
340
- initial_y = target_words[0][3]
341
- final_x = target_words[len(target_words)-1][4]
342
- final_y = target_words[len(target_words)-1][5]
343
- issue["coordinates"] = [initial_x, initial_y, final_x, final_y]
344
- issue["page"] = target_words[0][0] + 1
345
- # Add highlight annotations to the target words
346
- print()
347
- print("issue", issue)
348
- print("error text", error_text)
349
- print(target_words)
350
- print()
351
- for target in target_words:
352
- page_num, word_text, x0, y0, x1, y1 = target
353
- page = doc[page_num]
354
- # Define a rectangle around the word with some padding
355
- rect = fitz.Rect(x0 - 1, y0 - 1, x1 + 1, y1 + 1)
356
- # Add a highlight annotation
357
- highlight = page.add_highlight_annot(rect)
358
- highlight.set_colors(stroke=(1, 1, 0)) # Yellow color
359
- highlight.update()
360
- # print(f"Highlighted '{word_text}' on page {page_num + 1} at position ({x0}, {y0}, {x1}, {y1})")
361
-
362
-
363
- # Save annotated PDF to bytes
364
- byte_stream = io.BytesIO()
365
- doc.save(byte_stream)
366
- annotated_pdf_bytes = byte_stream.getvalue()
367
- doc.close()
368
-
369
- # Save annotated PDF locally for verification
370
- with open("annotated_temp.pdf", "wb") as f:
371
- f.write(annotated_pdf_bytes)
372
- # print("Annotated PDF saved as 'annotated_temp.pdf' for manual verification.")
373
-
374
- return language_matches, annotated_pdf_bytes
375
- except Exception as e:
376
- print(f"Error in highlighting PDF: {e}")
377
- return b""
378
-
379
-
380
-
381
-
382
  # ------------------------------
383
  # Main Analysis Function
384
  # ------------------------------
385
 
386
- # server/gradio_client.py
 
 
387
 
388
- def analyze_pdf(filepath: str) -> Tuple[Dict[str, Any], bytes]:
389
- """Analyzes the PDF for language issues and returns results and annotated PDF."""
390
  try:
391
- full_text = extract_pdf_text(filepath)
392
- if not full_text:
393
- return {"error": "Failed to extract text from PDF."}, None
 
 
 
 
 
 
394
 
395
- # Create the results structure
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396
  results = {
397
- "issues": [], # Initialize as empty array
398
- "regex_checks": {
399
- "metadata": check_metadata(full_text),
400
- "disclosures": check_disclosures(full_text),
401
- "figures_and_tables": check_figures_and_tables(full_text),
402
- "references": check_references(full_text),
403
- "structure": check_structure(full_text),
404
- "figure_order": check_figure_order(full_text),
405
- "reference_order": check_reference_order(full_text)
 
 
406
  }
407
  }
408
-
409
- # Handle language issues
410
- language_issues = check_language_issues(full_text)
411
- if "error" in language_issues:
412
- return {"error": language_issues["error"]}, None
413
-
414
- issues = language_issues.get("issues", [])
415
- if issues:
416
- language_matches, annotated_pdf = highlight_issues_in_pdf(filepath, issues)
417
- results["issues"] = language_matches # This is already an array from check_language_issues
418
- return results, annotated_pdf
419
- else:
420
- # Keep issues as empty array if none found
421
- return results, None
422
 
423
  except Exception as e:
 
 
 
 
 
 
424
  return {"error": str(e)}, None
 
425
  # ------------------------------
426
  # Gradio Interface
427
  # ------------------------------
428
 
429
- def process_upload(file):
430
- """
431
- Process the uploaded PDF file and return analysis results and annotated PDF.
432
- """
433
- # print(file.name)
434
- if file is None:
435
  return json.dumps({"error": "No file uploaded"}, indent=2), None
436
 
437
- # # Create a temporary file to work with
438
-
439
- # with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as temp_input:
440
- # temp_input.write(file)
441
- # temp_input_path = temp_input.name
442
- # print(temp_input_path)
443
-
444
- temp_input = tempfile.NamedTemporaryFile(delete=False, suffix='.pdf')
445
- temp_input.write(file)
446
- temp_input_path = temp_input.name
447
- print(temp_input_path)
448
- # Analyze the PDF
449
-
450
- results, annotated_pdf = analyze_pdf(temp_input_path)
451
-
452
- print(results)
453
- results_json = json.dumps(results, indent=2)
454
-
455
- # Clean up the temporary input file
456
- os.unlink(temp_input_path)
457
 
458
- # If we have an annotated PDF, save it temporarily
459
- if annotated_pdf:
460
- with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
461
- tmp_file.write(annotated_pdf)
462
- return results_json, tmp_file.name
 
 
 
463
 
464
- return results_json, None
465
-
466
- # except Exception as e:
467
- # error_message = json.dumps({
468
- # "error": str(e),
469
- # "traceback": traceback.format_exc()
470
- # }, indent=2)
471
- # return error_message, None
472
-
473
 
474
  def create_interface():
475
  with gr.Blocks(title="PDF Analyzer") as interface:
476
  gr.Markdown("# PDF Analyzer")
477
- gr.Markdown("Upload a PDF document to analyze its structure, references, language, and more.")
478
 
479
  with gr.Row():
480
  file_input = gr.File(
481
  label="Upload PDF",
482
  file_types=[".pdf"],
483
- type="binary"
484
  )
485
 
486
  with gr.Row():
@@ -488,28 +534,48 @@ def create_interface():
488
 
489
  with gr.Row():
490
  results_output = gr.JSON(
491
- label="Analysis Results",
492
  show_label=True
493
  )
494
 
495
  with gr.Row():
 
496
  pdf_output = gr.File(
497
- label="Annotated PDF",
498
- show_label=True
 
499
  )
500
 
501
  analyze_btn.click(
502
  fn=process_upload,
503
  inputs=[file_input],
504
- outputs=[results_output, pdf_output]
505
  )
506
-
507
  return interface
508
 
509
  if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
510
  interface = create_interface()
511
  interface.launch(
512
- share=False, # Set to False in production
513
- # server_name="0.0.0.0",
514
- server_port=None
515
- )
 
1
+ import pymupdf4llm
2
+ from markdown_it import MarkdownIt
3
+ from mdit_plain.renderer import RendererPlain
4
+ import os
5
  import re
6
+ from typing import Tuple, Optional, List, Dict, Any
7
+
8
  import fitz # PyMuPDF
9
+ from collections import defaultdict, Counter
 
10
  import language_tool_python
11
+
 
12
  import json
13
  import traceback
14
  import io
15
  import tempfile
16
+ # import os # Already imported
17
  import gradio as gr
18
 
19
+ # Set JAVA_HOME environment variable (from target script)
20
  os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-11-openjdk-amd64'
21
 
22
+
23
+ # --- Functions for PDF to Markdown to Plain Text ---
24
+ def convert_markdown_to_plain_text(markdown_text: str) -> str:
25
+ """
26
+ Converts a Markdown string to plain text.
27
+ """
28
+ if not markdown_text:
29
+ return ""
30
+ try:
31
+ parser = MarkdownIt(renderer_cls=RendererPlain)
32
+ plain_text = parser.render(markdown_text)
33
+ return plain_text
34
+ except Exception as e:
35
+ print(f"Error converting Markdown to plain text: {e}")
36
+ return markdown_text
37
+
38
+ # --- Function for Rectangle Conversion ---
39
+ def convert_rect_to_dict(rect: fitz.Rect) -> Optional[Dict[str, float]]:
40
+ """Converts a fitz.Rect object to a dictionary."""
41
+ if not rect or not isinstance(rect, fitz.Rect):
42
+ print(f"Warning: Invalid rect object received: {rect}")
43
+ return None
44
+ return {
45
+ "x0": rect.x0,
46
+ "y0": rect.y0,
47
+ "x1": rect.x1,
48
+ "y1": rect.y1,
49
+ "width": rect.width,
50
+ "height": rect.height
51
+ }
52
+
53
+ # --- Helper function for mapping LT issues to PDF rectangles ---
54
+ def try_map_issues_to_page_rects(
55
+ issues_to_map_for_context: List[Dict[str, Any]],
56
+ pdf_rects: List[fitz.Rect],
57
+ page_number_for_mapping: int # 1-based page number
58
+ ) -> int:
59
+ mapped_count = 0
60
+ num_issues_to_try = len(issues_to_map_for_context)
61
+ num_available_rects = len(pdf_rects)
62
+ limit = min(num_issues_to_try, num_available_rects)
63
+
64
+ for i in range(limit):
65
+ issue_to_update = issues_to_map_for_context[i]
66
+ if issue_to_update['is_mapped_to_pdf']: # Check the correct flag name
67
+ continue
68
+ pdf_rect = pdf_rects[i]
69
+ coord_dict = convert_rect_to_dict(pdf_rect)
70
+ if coord_dict:
71
+ issue_to_update['pdf_coordinates_list'] = [coord_dict] # Store as list of dicts
72
+ issue_to_update['is_mapped_to_pdf'] = True
73
+ issue_to_update['mapped_page_number'] = page_number_for_mapping
74
+ mapped_count += 1
75
+ else:
76
+ print(f" Warning: Could not convert rect for context '{issue_to_update['context_text'][:30]}...' on page {page_number_for_mapping}")
77
+ return mapped_count
78
+
79
+
80
  # ------------------------------
81
+ # Analysis Functions (from target script, with modifications)
82
  # ------------------------------
83
 
84
+ def extract_pdf_text(file_input: Any) -> str:
85
+ """Extracts full text from a PDF file using PyMuPDF4LLM (as Markdown)."""
86
+ temp_file_path_for_pymupdf4llm = None
87
+ actual_path_to_process = None
 
 
 
 
 
 
 
88
  try:
89
+ if isinstance(file_input, str):
90
+ actual_path_to_process = file_input
91
+ elif hasattr(file_input, 'read') and callable(file_input.read):
92
+ temp_file_obj = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
93
+ temp_file_path_for_pymupdf4llm = temp_file_obj.name
94
+ file_input.seek(0)
95
+ temp_file_obj.write(file_input.read())
96
+ temp_file_obj.close()
97
+ actual_path_to_process = temp_file_path_for_pymupdf4llm
98
  else:
99
+ raise ValueError("Input 'file_input' must be a file path (str) or a file-like object.")
100
+
101
+ doc_for_page_count = fitz.open(actual_path_to_process)
102
+ page_count = len(doc_for_page_count)
103
+ doc_for_page_count.close()
104
+ print(f"PDF has {page_count} pages. Extracting Markdown using pymupdf4llm.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
+ markdown_text = pymupdf4llm.to_markdown(actual_path_to_process)
 
 
107
 
108
+ print(f"Total extracted Markdown text length: {len(markdown_text)} characters.")
109
+ return markdown_text
 
110
 
111
  except Exception as e:
112
  print(f"Error extracting text from PDF: {str(e)}")
113
+ traceback.print_exc()
 
114
  return ""
115
+ finally:
116
+ if temp_file_path_for_pymupdf4llm and os.path.exists(temp_file_path_for_pymupdf4llm):
117
+ os.remove(temp_file_path_for_pymupdf4llm)
118
 
119
 
120
  def check_text_presence(full_text: str, search_terms: List[str]) -> Dict[str, bool]:
 
121
  return {term: term.lower() in full_text.lower() for term in search_terms}
122
 
123
  def label_authors(full_text: str) -> str:
124
+ # This function was in the original script but not directly used by analyze_pdf's output structure.
125
+ # Keeping it in case it's called elsewhere or for future use.
126
  author_line_regex = r"^(?:.*\n)(.*?)(?:\n\n)"
127
  match = re.search(author_line_regex, full_text, re.MULTILINE)
128
  if match:
 
130
  return full_text.replace(authors, f"Authors: {authors}")
131
  return full_text
132
 
133
+ def check_metadata(plain_text: str) -> Dict[str, Any]:
 
134
  return {
135
+ "author_email": bool(re.search(r'\b[\w.-]+?@\w+?\.\w+?\b', plain_text)),
136
+ "list_of_authors": bool(re.search(r'Authors?:', plain_text, re.IGNORECASE)),
137
+ "keywords_list": bool(re.search(r'Keywords?:', plain_text, re.IGNORECASE)),
138
+ "word_count": len(plain_text.split()) or "Missing"
139
  }
140
 
141
+ def check_disclosures(plain_text: str) -> Dict[str, bool]:
 
 
142
  search_terms = [
143
  "conflict of interest statement",
144
  "ethics statement",
145
  "funding statement",
146
  "data access statement"
147
  ]
148
+ results = check_text_presence(plain_text, search_terms)
149
+ has_author_contribution = ("author contribution statement" in plain_text.lower() or
150
+ "author contributions statement" in plain_text.lower())
 
 
 
 
 
 
151
  results["author contribution statement"] = has_author_contribution
 
152
  return results
153
 
154
+ def check_figures_and_tables(plain_text: str) -> Dict[str, bool]:
 
155
  return {
156
+ "figures_with_citations": bool(re.search(r'Figure \d+.*?citation', plain_text, re.IGNORECASE)),
157
+ "figures_legends": bool(re.search(r'Figure \d+.*?legend', plain_text, re.IGNORECASE)),
158
+ "tables_legends": bool(re.search(r'Table \d+.*?legend', plain_text, re.IGNORECASE))
159
  }
160
 
161
+ def check_references_summary(plain_text: str) -> Dict[str, Any]: # Renamed from check_references for clarity
162
+ abstract_candidate = plain_text[:2000]
163
  return {
164
+ "old_references": bool(re.search(r'\b19[0-9]{2}\b', plain_text)),
165
+ "citations_in_abstract": bool(re.search(r'\[\d+\]', abstract_candidate, re.IGNORECASE)) or \
166
+ bool(re.search(r'\bcit(?:ation|ed)\b', abstract_candidate, re.IGNORECASE)),
167
+ "reference_count": len(re.findall(r'\[\d+(?:,\s*\d+)*\]', plain_text)),
168
+ "self_citations": bool(re.search(r'Self-citation', plain_text, re.IGNORECASE))
169
  }
170
 
171
+ def check_structure(plain_text: str) -> Dict[str, bool]:
172
+ text_lower = plain_text.lower()
173
  return {
174
+ "imrad_structure": all(section.lower() in text_lower for section in ["introduction", "method", "result", "discussion"]),
175
+ "abstract_structure": "structured abstract" in text_lower
176
  }
177
 
178
+ def check_language_issues_and_regex(markdown_text_from_pdf: str) -> Dict[str, Any]:
179
+ """
180
+ Performs LanguageTool and specific regex checks on text derived from PDF's Markdown.
181
+ Filters issues to only include those between "abstract" and "references/bibliography".
182
+ Returns a list of issue dictionaries with fields for mapping.
183
+ """
184
+ if not markdown_text_from_pdf.strip():
185
+ return {"total_issues": 0, "issues_list": [], "text_used_for_analysis": ""}
186
+
187
+ plain_text_from_markdown = convert_markdown_to_plain_text(markdown_text_from_pdf)
188
+ text_for_analysis = plain_text_from_markdown.replace('\n', ' ')
189
+ text_for_analysis = re.sub(r'\s+', ' ', text_for_analysis).strip()
190
+
191
+ if not text_for_analysis:
192
+ return {"total_issues": 0, "issues_list": [], "text_used_for_analysis": ""}
193
+
194
+ # --- Determine content boundaries ---
195
+ text_for_analysis_lower = text_for_analysis.lower()
196
+
197
+ abstract_match = re.search(r'\babstract\b', text_for_analysis_lower)
198
+ # If "abstract" is found, analysis starts from its beginning. Otherwise, from text start.
199
+ content_start_index = abstract_match.start() if abstract_match else 0
200
+ if abstract_match:
201
+ print(f"Found 'abstract' at index {content_start_index}")
202
+ else:
203
+ print(f"Did not find 'abstract', starting language analysis from index 0")
204
+
205
+ references_match = re.search(r'\breferences\b', text_for_analysis_lower)
206
+ bibliography_match = re.search(r'\bbibliography\b', text_for_analysis_lower)
207
+
208
+ content_end_index = len(text_for_analysis) # Default to end of text
209
+
210
+ if references_match and bibliography_match:
211
+ content_end_index = min(references_match.start(), bibliography_match.start())
212
+ print(f"Found 'references' at {references_match.start()} and 'bibliography' at {bibliography_match.start()}. Using {content_end_index} as end boundary.")
213
+ elif references_match:
214
+ content_end_index = references_match.start()
215
+ print(f"Found 'references' at {content_end_index}. Using it as end boundary.")
216
+ elif bibliography_match:
217
+ content_end_index = bibliography_match.start()
218
+ print(f"Found 'bibliography' at {content_end_index}. Using it as end boundary.")
219
+ else:
220
+ print(f"Did not find 'references' or 'bibliography'. Language analysis up to end of text (index {content_end_index}).")
221
+
222
+ # If "abstract" is found after "references/bibliography", the range is invalid for filtering.
223
+ # In such a case, or if no abstract is found, we might effectively process a very small or no region.
224
+ # This logic correctly makes the valid region empty if abstract_start >= content_end.
225
+ if content_start_index >= content_end_index:
226
+ print(f"Warning: Content start index ({content_start_index}) is not before content end index ({content_end_index}). No language issues will be reported from this range.")
227
+ # Effectively, no issues will pass the filter below.
228
+
229
+ tool = None
230
+ processed_issues: List[Dict[str, Any]] = []
231
  try:
232
+ tool = language_tool_python.LanguageTool('en-US')
233
+ raw_lt_matches = tool.check(text_for_analysis)
 
234
 
235
+ lt_issues_in_range = 0
236
+ for idx, match in enumerate(raw_lt_matches):
237
+ if match.ruleId == "EN_SPLIT_WORDS_HYPHEN": continue
238
+
239
+ # Filter by content boundaries
240
+ if not (content_start_index <= match.offset < content_end_index):
241
  continue
242
+ lt_issues_in_range +=1
243
+
244
+ context_str = text_for_analysis[match.offset : match.offset + match.errorLength]
245
+ processed_issues.append({
246
+ '_internal_id': f"lt_{idx}",
247
+ 'ruleId': match.ruleId,
248
+ 'message': match.message,
249
+ 'context_text': context_str,
250
+ 'offset_in_text': match.offset,
251
+ 'error_length': match.errorLength,
252
+ 'replacements_suggestion': match.replacements[:3] if match.replacements else [],
253
+ 'category_name': match.category,
254
+ 'is_mapped_to_pdf': False,
255
+ 'pdf_coordinates_list': [],
256
+ 'mapped_page_number': -1
257
  })
258
+ print(f"LanguageTool found {len(raw_lt_matches)} raw issues, {lt_issues_in_range} issues within defined content range.")
259
 
 
 
 
 
 
260
  regex_pattern = r'\b(\w+)\[(\d+)\]'
261
+ regex_matches = list(re.finditer(regex_pattern, text_for_analysis))
 
262
 
263
+ regex_issues_in_range = 0
264
+ for reg_idx, match in enumerate(regex_matches):
265
+ # Filter by content boundaries
266
+ if not (content_start_index <= match.start() < content_end_index):
267
+ continue
268
+ regex_issues_in_range += 1
269
+
270
  word = match.group(1)
271
  number = match.group(2)
272
+ processed_issues.append({
273
+ '_internal_id': f"regex_{reg_idx}",
274
+ 'ruleId': "SPACE_BEFORE_BRACKET",
275
+ 'message': f"Missing space before '[' in '{word}[{number}]'. Should be '{word} [{number}]'.",
276
+ 'context_text': text_for_analysis[match.start():match.end()],
277
+ 'offset_in_text': match.start(),
278
+ 'error_length': match.end() - match.start(),
279
+ 'replacements_suggestion': [f"{word} [{number}]"],
280
+ 'category_name': "Formatting",
281
+ 'is_mapped_to_pdf': False,
282
+ 'pdf_coordinates_list': [],
283
+ 'mapped_page_number': -1
284
  })
285
+ print(f"Regex check found {len(regex_matches)} raw matches, {regex_issues_in_range} issues within defined content range.")
 
286
 
287
  return {
288
+ "total_issues": len(processed_issues),
289
+ "issues_list": processed_issues,
290
+ "text_used_for_analysis": text_for_analysis
291
  }
292
  except Exception as e:
293
+ print(f"Error in check_language_issues_and_regex: {e}")
294
+ traceback.print_exc()
295
+ return {"error": str(e), "total_issues": 0, "issues_list": [], "text_used_for_analysis": text_for_analysis}
296
+ finally:
297
+ if tool: tool.close()
 
 
 
 
 
298
 
299
+ def check_figure_order(plain_text: str) -> Dict[str, Any]:
 
300
  figure_pattern = r'(?:Fig(?:ure)?\.?|Figure)\s*(\d+)'
301
+ figure_references_str = re.findall(figure_pattern, plain_text, re.IGNORECASE)
 
302
 
303
+ valid_figure_numbers_int = []
304
+ for num_str in figure_references_str:
305
+ if num_str.isdigit():
306
+ valid_figure_numbers_int.append(int(num_str))
307
 
308
+ unique_sorted_figures = sorted(list(set(valid_figure_numbers_int)))
309
+ is_sequential = all(unique_sorted_figures[i] + 1 == unique_sorted_figures[i+1] for i in range(len(unique_sorted_figures)-1))
 
 
 
310
 
311
+ missing_figures = []
312
+ if unique_sorted_figures:
313
+ expected_figures = set(range(1, max(unique_sorted_figures) + 1))
314
+ missing_figures = sorted(list(expected_figures - set(unique_sorted_figures)))
315
+
316
+ counts = Counter(valid_figure_numbers_int)
317
+ duplicate_refs = [num for num, count in counts.items() if count > 1]
318
 
319
  return {
320
+ "sequential_order_of_unique_figures": is_sequential,
321
+ "figure_count_unique": len(unique_sorted_figures),
322
+ "missing_figures_in_sequence_to_max": missing_figures,
323
+ "figure_order_as_encountered": valid_figure_numbers_int,
324
+ "duplicate_references_to_same_figure_number": duplicate_refs
 
325
  }
326
 
327
+ def check_reference_order(plain_text: str) -> Dict[str, Any]:
328
+ reference_pattern = r'\[(\d+)\]'
329
+ references_str = re.findall(reference_pattern, plain_text)
330
+ ref_numbers_int = [int(ref) for ref in references_str if ref.isdigit()]
 
331
 
332
+ max_ref_val = 0
333
+ out_of_order_details = []
 
 
 
 
334
 
335
+ if ref_numbers_int:
336
+ max_ref_val = max(ref_numbers_int)
337
+ current_max_seen_in_text = 0
338
+ for i, ref in enumerate(ref_numbers_int):
339
+ if ref < current_max_seen_in_text :
340
+ out_of_order_details.append({
341
+ "position_in_text_occurrences": i + 1,
342
+ "value": ref,
343
+ "previous_max_value_seen": current_max_seen_in_text,
344
+ "message": f"Reference [{ref}] appeared after a higher reference [{current_max_seen_in_text}] was already cited."
345
+ })
346
+ current_max_seen_in_text = max(current_max_seen_in_text, ref)
347
+
348
+ all_expected_refs_up_to_max = set(range(1, max_ref_val + 1)) if max_ref_val > 0 else set()
349
+ used_refs_set = set(ref_numbers_int)
350
+ missing_refs_in_sequence_to_max = sorted(list(all_expected_refs_up_to_max - used_refs_set))
351
 
352
+ is_ordered_in_text = all(ref_numbers_int[i] <= ref_numbers_int[i+1] for i in range(len(ref_numbers_int)-1))
353
+
354
  return {
355
+ "max_reference_number_cited": max_ref_val,
356
+ "out_of_order_citations_details": out_of_order_details,
357
+ "missing_references_up_to_max_cited": missing_refs_in_sequence_to_max,
358
+ "is_citation_order_non_decreasing_in_text": is_ordered_in_text
359
  }
360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
  # ------------------------------
362
  # Main Analysis Function
363
  # ------------------------------
364
 
365
+ def analyze_pdf(filepath_or_stream: Any) -> Tuple[Dict[str, Any], None]:
366
+ doc_for_mapping = None
367
+ temp_fitz_file_path = None
368
 
 
 
369
  try:
370
+ markdown_text = extract_pdf_text(filepath_or_stream)
371
+ if not markdown_text:
372
+ return {"error": "Failed to extract text (Markdown) from PDF."}, None
373
+
374
+ plain_text_for_general_checks = convert_markdown_to_plain_text(markdown_text)
375
+ cleaned_plain_text_for_regex = re.sub(r'\s+', ' ', plain_text_for_general_checks.replace('\n', ' ')).strip()
376
+
377
+ # This will now use the modified function with boundary filtering
378
+ language_and_regex_issue_report = check_language_issues_and_regex(markdown_text)
379
 
380
+ if "error" in language_and_regex_issue_report:
381
+ return {"error": f"Language/Regex check error: {language_and_regex_issue_report['error']}"}, None
382
+
383
+ detailed_issues_for_mapping = language_and_regex_issue_report.get("issues_list", [])
384
+
385
+ if detailed_issues_for_mapping:
386
+ # The rest of the mapping logic remains the same, operating on the filtered issues.
387
+ if isinstance(filepath_or_stream, str):
388
+ pdf_path_for_fitz = filepath_or_stream
389
+ elif hasattr(filepath_or_stream, 'read') and callable(filepath_or_stream.read):
390
+ filepath_or_stream.seek(0)
391
+ temp_fitz_file = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
392
+ temp_fitz_file_path = temp_fitz_file.name
393
+ temp_fitz_file.write(filepath_or_stream.read())
394
+ temp_fitz_file.close()
395
+ pdf_path_for_fitz = temp_fitz_file_path
396
+ else:
397
+ # This case should ideally be caught by extract_pdf_text, but good to have a fallback
398
+ return {"error": "Invalid PDF input for coordinate mapping."}, None
399
+
400
+ try:
401
+ doc_for_mapping = fitz.open(pdf_path_for_fitz)
402
+ if doc_for_mapping.page_count > 0:
403
+ print(f"\n--- Mapping {len(detailed_issues_for_mapping)} Issues (filtered) to PDF Coordinates ---")
404
+ # Only attempt to map issues if there are any after filtering
405
+ if detailed_issues_for_mapping:
406
+ for page_idx in range(doc_for_mapping.page_count):
407
+ page = doc_for_mapping[page_idx]
408
+ current_page_num_1_based = page_idx + 1
409
+
410
+ unmapped_issues_on_this_page_by_context = defaultdict(list)
411
+ for issue_dict in detailed_issues_for_mapping:
412
+ if not issue_dict['is_mapped_to_pdf']:
413
+ unmapped_issues_on_this_page_by_context[issue_dict['context_text']].append(issue_dict)
414
+
415
+ if not unmapped_issues_on_this_page_by_context:
416
+ if all(iss['is_mapped_to_pdf'] for iss in detailed_issues_for_mapping): break
417
+ continue
418
+
419
+ for ctx_str, issues_for_ctx in unmapped_issues_on_this_page_by_context.items():
420
+ if not ctx_str.strip(): continue
421
+ try:
422
+ # Use TEXT_PRESERVE_LIGATURES and TEXT_PRESERVE_WHITESPACE for better matching
423
+ # with text derived from pymupdf4llm which tries to preserve structure.
424
+ pdf_rects = page.search_for(ctx_str, flags=fitz.TEXT_PRESERVE_LIGATURES | fitz.TEXT_PRESERVE_WHITESPACE)
425
+ if pdf_rects:
426
+ try_map_issues_to_page_rects(issues_for_ctx, pdf_rects, current_page_num_1_based)
427
+ except Exception as search_exc:
428
+ print(f"Warning: Error searching for context '{ctx_str[:30]}' on page {current_page_num_1_based}: {search_exc}")
429
+ total_mapped = sum(1 for iss in detailed_issues_for_mapping if iss['is_mapped_to_pdf'])
430
+ print(f"Finished coordinate mapping. Mapped issues: {total_mapped}/{len(detailed_issues_for_mapping)}.")
431
+ else:
432
+ print("No language/regex issues found within the defined content boundaries to map.")
433
+ except Exception as e_map:
434
+ print(f"Error during PDF coordinate mapping: {e_map}")
435
+ traceback.print_exc()
436
+ finally:
437
+ if doc_for_mapping: doc_for_mapping.close()
438
+ if temp_fitz_file_path and os.path.exists(temp_fitz_file_path):
439
+ os.unlink(temp_fitz_file_path)
440
+
441
+ final_formatted_issues_list = []
442
+ for issue_data in detailed_issues_for_mapping: # This list is already filtered
443
+ page_num_for_json = 0
444
+ coords_for_json = []
445
+ if issue_data['is_mapped_to_pdf'] and issue_data['pdf_coordinates_list']:
446
+ # Assuming pdf_coordinates_list stores a list of dicts, take the first one
447
+ coord_dict = issue_data['pdf_coordinates_list'][0]
448
+ coords_for_json = [coord_dict['x0'], coord_dict['y0'], coord_dict['x1'], coord_dict['y1']]
449
+ page_num_for_json = issue_data['mapped_page_number']
450
+
451
+ final_formatted_issues_list.append({
452
+ "message": issue_data['message'],
453
+ "context": issue_data['context_text'],
454
+ "suggestions": issue_data['replacements_suggestion'],
455
+ "category": issue_data['category_name'],
456
+ "rule_id": issue_data['ruleId'],
457
+ "offset": issue_data['offset_in_text'],
458
+ "length": issue_data['error_length'],
459
+ "coordinates": coords_for_json,
460
+ "page": page_num_for_json
461
+ })
462
+
463
  results = {
464
+ "issues": final_formatted_issues_list, # This will now contain only filtered issues
465
+ "document_checks": {
466
+ "metadata": check_metadata(cleaned_plain_text_for_regex),
467
+ "disclosures": check_disclosures(cleaned_plain_text_for_regex),
468
+ "figures_and_tables": check_figures_and_tables(cleaned_plain_text_for_regex),
469
+ "references_summary": check_references_summary(cleaned_plain_text_for_regex),
470
+ "structure": check_structure(cleaned_plain_text_for_regex),
471
+ "figure_order_analysis": check_figure_order(cleaned_plain_text_for_regex),
472
+ "reference_order_analysis": check_reference_order(cleaned_plain_text_for_regex),
473
+ "plain_language_summary_present": bool(re.search(r'plain language summary', cleaned_plain_text_for_regex, re.IGNORECASE)),
474
+ "readability_issues_detected": False, # Placeholder, not implemented
475
  }
476
  }
477
+
478
+ return results, None
 
 
 
 
 
 
 
 
 
 
 
 
479
 
480
  except Exception as e:
481
+ print(f"Overall analysis error in analyze_pdf: {e}")
482
+ traceback.print_exc()
483
+ # Ensure cleanup even if an early error occurs
484
+ if doc_for_mapping: doc_for_mapping.close()
485
+ if temp_fitz_file_path and os.path.exists(temp_fitz_file_path):
486
+ os.unlink(temp_fitz_file_path)
487
  return {"error": str(e)}, None
488
+
489
  # ------------------------------
490
  # Gradio Interface
491
  # ------------------------------
492
 
493
+ def process_upload(file_data_binary: bytes) -> Tuple[str, Optional[str]]:
494
+ if file_data_binary is None:
 
 
 
 
495
  return json.dumps({"error": "No file uploaded"}, indent=2), None
496
 
497
+ temp_input_path = None
498
+ try:
499
+ # Create a temporary file with .pdf extension from the binary data
500
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as temp_input_file:
501
+ temp_input_file.write(file_data_binary)
502
+ temp_input_path = temp_input_file.name
503
+ print(f"Temporary PDF for analysis: {temp_input_path}")
504
+
505
+ results_dict, _ = analyze_pdf(temp_input_path) # Pass the path to the temp file
506
+
507
+ results_json = json.dumps(results_dict, indent=2, ensure_ascii=False)
508
+ return results_json, None # No annotated PDF path to return for now
 
 
 
 
 
 
 
 
509
 
510
+ except Exception as e:
511
+ print(f"Error in process_upload: {e}")
512
+ error_message = json.dumps({"error": str(e), "traceback": traceback.format_exc()}, indent=2)
513
+ return error_message, None
514
+ finally:
515
+ if temp_input_path and os.path.exists(temp_input_path):
516
+ os.unlink(temp_input_path)
517
+ print(f"Cleaned up temporary file: {temp_input_path}")
518
 
 
 
 
 
 
 
 
 
 
519
 
520
  def create_interface():
521
  with gr.Blocks(title="PDF Analyzer") as interface:
522
  gr.Markdown("# PDF Analyzer")
523
+ gr.Markdown("Upload a PDF document to analyze its structure, references, language, and more. Language issues will include PDF coordinates if found, and are filtered to appear between 'Abstract' and 'References/Bibliography'.")
524
 
525
  with gr.Row():
526
  file_input = gr.File(
527
  label="Upload PDF",
528
  file_types=[".pdf"],
529
+ type="binary" # Changed to binary to handle uploads directly
530
  )
531
 
532
  with gr.Row():
 
534
 
535
  with gr.Row():
536
  results_output = gr.JSON(
537
+ label="Analysis Results (Coordinates for issues in 'issues' list)",
538
  show_label=True
539
  )
540
 
541
  with gr.Row():
542
+ # Keeping the placeholder for PDF output, but it's not functional for annotation
543
  pdf_output = gr.File(
544
+ label="Annotated PDF (Functionality Removed - View Coordinates in JSON)",
545
+ show_label=True,
546
+ # value=None # Ensure it's empty initially
547
  )
548
 
549
  analyze_btn.click(
550
  fn=process_upload,
551
  inputs=[file_input],
552
+ outputs=[results_output, pdf_output] # pdf_output will receive None
553
  )
 
554
  return interface
555
 
556
  if __name__ == "__main__":
557
+ print("\n--- Launching Gradio Interface ---")
558
+ # Ensure JAVA_HOME is set if not globally configured
559
+ if 'JAVA_HOME' not in os.environ:
560
+ # Attempt to set a common default if necessary, or ensure the user sets it.
561
+ # For this script, it's set at the top.
562
+ print("JAVA_HOME is set to:", os.environ.get('JAVA_HOME'))
563
+ else:
564
+ print("JAVA_HOME is set to:", os.environ.get('JAVA_HOME'))
565
+
566
+ # Check if LanguageTool can be initialized (optional check)
567
+ try:
568
+ lt_test = language_tool_python.LanguageTool('en-US')
569
+ lt_test.close()
570
+ print("LanguageTool initialized successfully.")
571
+ except Exception as lt_e:
572
+ print(f"Warning: Could not initialize LanguageTool. Language checks might fail: {lt_e}")
573
+ print("Please ensure Java is installed and JAVA_HOME is correctly set.")
574
+ print("For example, on Ubuntu with OpenJDK 11: export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64")
575
+
576
+
577
  interface = create_interface()
578
  interface.launch(
579
+ share=False, # Set to True for public link if ngrok is installed
580
+ server_port=None # Gradio will pick an available port
581
+ )