|
import re |
|
import fitz |
|
from pdfminer.high_level import extract_text |
|
from pdfminer.layout import LAParams |
|
import language_tool_python |
|
from typing import List, Dict, Any, Tuple |
|
from collections import Counter |
|
import json |
|
import traceback |
|
import io |
|
import tempfile |
|
import os |
|
import gradio as gr |
|
|
|
|
|
os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-11-openjdk-amd64' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def extract_pdf_text(file) -> str: |
|
"""Extracts full text from a PDF file using PyMuPDF4LLM.""" |
|
try: |
|
print(f"Opening PDF file: {file}") |
|
|
|
|
|
temp_file_path = None |
|
if isinstance(file, str): |
|
print(f"Opening file by path: {file}") |
|
file_path = file |
|
else: |
|
print(f"Opening file from stream") |
|
import tempfile |
|
import os |
|
temp_file = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False) |
|
temp_file_path = temp_file.name |
|
temp_file.write(file.read()) |
|
temp_file.close() |
|
file_path = temp_file_path |
|
|
|
|
|
doc = fitz.open(file_path) |
|
page_count = len(doc) |
|
doc.close() |
|
print(f"PDF opened successfully with {page_count} pages") |
|
|
|
|
|
import pymupdf4llm |
|
full_text = pymupdf4llm.to_markdown(file_path) |
|
|
|
|
|
avg_chars_per_page = len(full_text) // page_count if page_count > 0 else 0 |
|
for page_number in range(page_count): |
|
print(f"Extracted {avg_chars_per_page} characters from page {page_number+1}") |
|
|
|
|
|
if temp_file_path: |
|
os.remove(temp_file_path) |
|
|
|
print(f"Total extracted text length: {len(full_text)} characters.") |
|
print(full_text) |
|
return full_text |
|
|
|
except Exception as e: |
|
print(f"Error extracting text from PDF: {str(e)}") |
|
import traceback |
|
print(traceback.format_exc()) |
|
return "" |
|
|
|
|
|
def check_text_presence(full_text: str, search_terms: List[str]) -> Dict[str, bool]: |
|
"""Checks for the presence of required terms in the text.""" |
|
return {term: term.lower() in full_text.lower() for term in search_terms} |
|
|
|
def label_authors(full_text: str) -> str: |
|
"""Label authors in the text with 'Authors:' if not already labeled.""" |
|
author_line_regex = r"^(?:.*\n)(.*?)(?:\n\n)" |
|
match = re.search(author_line_regex, full_text, re.MULTILINE) |
|
if match: |
|
authors = match.group(1).strip() |
|
return full_text.replace(authors, f"Authors: {authors}") |
|
return full_text |
|
|
|
def check_metadata(full_text: str) -> Dict[str, Any]: |
|
"""Check for metadata elements.""" |
|
return { |
|
"author_email": bool(re.search(r'\b[\w.-]+?@\w+?\.\w+?\b', full_text)), |
|
"list_of_authors": bool(re.search(r'Authors?:', full_text, re.IGNORECASE)), |
|
"keywords_list": bool(re.search(r'Keywords?:', full_text, re.IGNORECASE)), |
|
"word_count": len(full_text.split()) or "Missing" |
|
} |
|
|
|
def check_disclosures(full_text: str) -> Dict[str, bool]: |
|
"""Check for disclosure statements.""" |
|
|
|
search_terms = [ |
|
"conflict of interest statement", |
|
"ethics statement", |
|
"funding statement", |
|
"data access statement" |
|
] |
|
|
|
|
|
results = check_text_presence(full_text, search_terms) |
|
|
|
|
|
has_author_contribution = ("author contribution statement" in full_text.lower() or |
|
"author contributions statement" in full_text.lower()) |
|
|
|
|
|
results["author contribution statement"] = has_author_contribution |
|
|
|
return results |
|
|
|
def check_figures_and_tables(full_text: str) -> Dict[str, bool]: |
|
"""Check for figures and tables.""" |
|
return { |
|
"figures_with_citations": bool(re.search(r'Figure \d+.*?citation', full_text, re.IGNORECASE)), |
|
"figures_legends": bool(re.search(r'Figure \d+.*?legend', full_text, re.IGNORECASE)), |
|
"tables_legends": bool(re.search(r'Table \d+.*?legend', full_text, re.IGNORECASE)) |
|
} |
|
|
|
def check_references(full_text: str) -> Dict[str, Any]: |
|
"""Check for references.""" |
|
return { |
|
"old_references": bool(re.search(r'\b19[0-9]{2}\b', full_text)), |
|
"citations_in_abstract": bool(re.search(r'\b(citation|reference)\b', full_text[:1000], re.IGNORECASE)), |
|
"reference_count": len(re.findall(r'\[.*?\]', full_text)), |
|
"self_citations": bool(re.search(r'Self-citation', full_text, re.IGNORECASE)) |
|
} |
|
|
|
def check_structure(full_text: str) -> Dict[str, bool]: |
|
"""Check document structure.""" |
|
return { |
|
"imrad_structure": all(section in full_text for section in ["Introduction", "Methods", "Results", "Discussion"]), |
|
"abstract_structure": "structured abstract" in full_text.lower() |
|
} |
|
|
|
def check_language_issues(full_text: str) -> Dict[str, Any]: |
|
"""Check for language issues using LanguageTool and additional regex patterns.""" |
|
try: |
|
language_tool = language_tool_python.LanguageTool('en-US') |
|
matches = language_tool.check(full_text) |
|
issues = [] |
|
|
|
|
|
for match in matches: |
|
|
|
if match.ruleId == "EN_SPLIT_WORDS_HYPHEN": |
|
continue |
|
|
|
issues.append({ |
|
"message": match.message, |
|
"context": match.context.strip(), |
|
"suggestions": match.replacements[:3] if match.replacements else [], |
|
"category": match.category, |
|
"rule_id": match.ruleId, |
|
"offset": match.offset, |
|
"length": match.errorLength, |
|
"coordinates": [], |
|
"page": 0 |
|
}) |
|
print(f"Total language issues found: {len(issues)}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
regex_pattern = r'\b(\w+)\[(\d+)\]' |
|
regex_matches = list(re.finditer(regex_pattern, full_text)) |
|
print(f"Total regex issues found: {len(regex_matches)}") |
|
|
|
|
|
for match in regex_matches: |
|
word = match.group(1) |
|
number = match.group(2) |
|
start = match.start() |
|
end = match.end() |
|
issues.append({ |
|
"message": f"Missing space before '[' in '{word}[{number}]'. Should be '{word} [{number}]'.", |
|
"context": full_text[max(match.start() - 30, 0):min(match.end() + 30, len(full_text))].strip(), |
|
"suggestions": [f"{word} [{number}]", f"{word} [`{number}`]", f"{word} [number {number}]"], |
|
"category": "Formatting", |
|
"rule_id": "SPACE_BEFORE_BRACKET", |
|
"offset": match.start(), |
|
"length": match.end() - match.start(), |
|
"coordinates": [], |
|
"page": 0 |
|
}) |
|
|
|
print(f"Total combined issues found: {len(issues)}") |
|
|
|
return { |
|
"total_issues": len(issues), |
|
"issues": issues |
|
} |
|
except Exception as e: |
|
print(f"Error checking language issues: {e}") |
|
return {"error": str(e)} |
|
|
|
def check_language(full_text: str) -> Dict[str, Any]: |
|
"""Check language quality.""" |
|
return { |
|
"plain_language": bool(re.search(r'plain language summary', full_text, re.IGNORECASE)), |
|
"readability_issues": False, |
|
"language_issues": check_language_issues(full_text) |
|
} |
|
|
|
def check_figure_order(full_text: str) -> Dict[str, Any]: |
|
"""Check if figures are referred to in sequential order.""" |
|
figure_pattern = r'(?:Fig(?:ure)?\.?|Figure)\s*(\d+)' |
|
figure_references = re.findall(figure_pattern, full_text, re.IGNORECASE) |
|
figure_numbers = sorted(set(int(num) for num in figure_references)) |
|
|
|
is_sequential = all(a + 1 == b for a, b in zip(figure_numbers, figure_numbers[1:])) |
|
|
|
if figure_numbers: |
|
expected_figures = set(range(1, max(figure_numbers) + 1)) |
|
missing_figures = list(expected_figures - set(figure_numbers)) |
|
else: |
|
missing_figures = None |
|
|
|
duplicates = [num for num, count in Counter(figure_references).items() if count > 1] |
|
duplicate_numbers = [int(num) for num in duplicates] |
|
not_mentioned = list(set(figure_references) - set(duplicates)) |
|
|
|
return { |
|
"sequential_order": is_sequential, |
|
"figure_count": len(figure_numbers), |
|
"missing_figures": missing_figures, |
|
"figure_order": figure_numbers, |
|
"duplicate_references": duplicates, |
|
"not_mentioned": not_mentioned |
|
} |
|
|
|
def check_reference_order(full_text: str) -> Dict[str, Any]: |
|
"""Check if references in the main body text are in order.""" |
|
reference_pattern = r'\[(\d+)\]' |
|
references = re.findall(reference_pattern, full_text) |
|
ref_numbers = [int(ref) for ref in references] |
|
|
|
max_ref = 0 |
|
out_of_order = [] |
|
for i, ref in enumerate(ref_numbers): |
|
if ref > max_ref + 1: |
|
out_of_order.append((i+1, ref)) |
|
max_ref = max(max_ref, ref) |
|
|
|
all_refs = set(range(1, max_ref + 1)) |
|
used_refs = set(ref_numbers) |
|
missing_refs = list(all_refs - used_refs) |
|
|
|
return { |
|
"max_reference": max_ref, |
|
"out_of_order": out_of_order, |
|
"missing_references": missing_refs, |
|
"is_ordered": len(out_of_order) == 0 and len(missing_refs) == 0 |
|
} |
|
|
|
def highlight_issues_in_pdf(file, language_matches: List[Dict[str, Any]]) -> bytes: |
|
""" |
|
Highlights language issues in the PDF and returns the annotated PDF as bytes. |
|
This function maps LanguageTool matches to specific words in the PDF |
|
and highlights those words. |
|
""" |
|
try: |
|
|
|
doc = fitz.open(stream=file.read(), filetype="pdf") if not isinstance(file, str) else fitz.open(file) |
|
|
|
|
|
|
|
word_list = [] |
|
for page_number in range(len(doc)): |
|
page = doc[page_number] |
|
print(page.get_text("words")) |
|
words = page.get_text("words") |
|
for w in words: |
|
|
|
word_text = w[4] |
|
|
|
|
|
|
|
word_list.append((page_number, word_text, w[0], w[1], w[2], w[3])) |
|
|
|
|
|
|
|
concatenated_text="" |
|
concatenated_text = " ".join([w[1] for w in word_list]) |
|
|
|
|
|
|
|
|
|
abstract_start = concatenated_text.lower().find("abstract") |
|
abstract_offset = 0 if abstract_start == -1 else abstract_start |
|
|
|
|
|
references_start = concatenated_text.lower().rfind("references") |
|
references_offset = len(concatenated_text) if references_start == -1 else references_start |
|
|
|
|
|
for idx, issue in enumerate(language_matches, start=1): |
|
offset = issue["offset"] |
|
length = issue["length"] |
|
|
|
|
|
if offset < abstract_offset or offset >= references_offset: |
|
continue |
|
|
|
|
|
error_text = concatenated_text[offset:offset+length] |
|
print(f"\nIssue {idx}: '{error_text}' at offset {offset} with length {length}") |
|
|
|
|
|
current_pos = 0 |
|
target_words = [] |
|
for word in word_list: |
|
word_text = word[1] |
|
word_length = len(word_text) + 1 |
|
|
|
if current_pos + word_length > offset and current_pos < offset + length: |
|
target_words.append(word) |
|
current_pos += word_length |
|
|
|
if not target_words: |
|
|
|
continue |
|
|
|
initial_x = target_words[0][2] |
|
initial_y = target_words[0][3] |
|
final_x = target_words[len(target_words)-1][4] |
|
final_y = target_words[len(target_words)-1][5] |
|
issue["coordinates"] = [initial_x, initial_y, final_x, final_y] |
|
issue["page"] = target_words[0][0] + 1 |
|
|
|
print() |
|
print("issue", issue) |
|
print("error text", error_text) |
|
print(target_words) |
|
print() |
|
for target in target_words: |
|
page_num, word_text, x0, y0, x1, y1 = target |
|
page = doc[page_num] |
|
|
|
rect = fitz.Rect(x0 - 1, y0 - 1, x1 + 1, y1 + 1) |
|
|
|
highlight = page.add_highlight_annot(rect) |
|
highlight.set_colors(stroke=(1, 1, 0)) |
|
highlight.update() |
|
|
|
|
|
|
|
|
|
byte_stream = io.BytesIO() |
|
doc.save(byte_stream) |
|
annotated_pdf_bytes = byte_stream.getvalue() |
|
doc.close() |
|
|
|
|
|
with open("annotated_temp.pdf", "wb") as f: |
|
f.write(annotated_pdf_bytes) |
|
|
|
|
|
return language_matches, annotated_pdf_bytes |
|
except Exception as e: |
|
print(f"Error in highlighting PDF: {e}") |
|
return b"" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def analyze_pdf(filepath: str) -> Tuple[Dict[str, Any], bytes]: |
|
"""Analyzes the PDF for language issues and returns results and annotated PDF.""" |
|
try: |
|
full_text = extract_pdf_text(filepath) |
|
if not full_text: |
|
return {"error": "Failed to extract text from PDF."}, None |
|
|
|
|
|
results = { |
|
"issues": [], |
|
"regex_checks": { |
|
"metadata": check_metadata(full_text), |
|
"disclosures": check_disclosures(full_text), |
|
"figures_and_tables": check_figures_and_tables(full_text), |
|
"references": check_references(full_text), |
|
"structure": check_structure(full_text), |
|
"figure_order": check_figure_order(full_text), |
|
"reference_order": check_reference_order(full_text) |
|
} |
|
} |
|
|
|
|
|
language_issues = check_language_issues(full_text) |
|
if "error" in language_issues: |
|
return {"error": language_issues["error"]}, None |
|
|
|
issues = language_issues.get("issues", []) |
|
if issues: |
|
language_matches, annotated_pdf = highlight_issues_in_pdf(filepath, issues) |
|
results["issues"] = language_matches |
|
return results, annotated_pdf |
|
else: |
|
|
|
return results, None |
|
|
|
except Exception as e: |
|
return {"error": str(e)}, None |
|
|
|
|
|
|
|
|
|
def process_upload(file): |
|
""" |
|
Process the uploaded PDF file and return analysis results and annotated PDF. |
|
""" |
|
|
|
if file is None: |
|
return json.dumps({"error": "No file uploaded"}, indent=2), None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
temp_input = tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') |
|
temp_input.write(file) |
|
temp_input_path = temp_input.name |
|
print(temp_input_path) |
|
|
|
|
|
results, annotated_pdf = analyze_pdf(temp_input_path) |
|
|
|
print(results) |
|
results_json = json.dumps(results, indent=2) |
|
|
|
|
|
os.unlink(temp_input_path) |
|
|
|
|
|
if annotated_pdf: |
|
with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file: |
|
tmp_file.write(annotated_pdf) |
|
return results_json, tmp_file.name |
|
|
|
return results_json, None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_interface(): |
|
with gr.Blocks(title="PDF Analyzer") as interface: |
|
gr.Markdown("# PDF Analyzer") |
|
gr.Markdown("Upload a PDF document to analyze its structure, references, language, and more.") |
|
|
|
with gr.Row(): |
|
file_input = gr.File( |
|
label="Upload PDF", |
|
file_types=[".pdf"], |
|
type="binary" |
|
) |
|
|
|
with gr.Row(): |
|
analyze_btn = gr.Button("Analyze PDF") |
|
|
|
with gr.Row(): |
|
results_output = gr.JSON( |
|
label="Analysis Results", |
|
show_label=True |
|
) |
|
|
|
with gr.Row(): |
|
pdf_output = gr.File( |
|
label="Annotated PDF", |
|
show_label=True |
|
) |
|
|
|
analyze_btn.click( |
|
fn=process_upload, |
|
inputs=[file_input], |
|
outputs=[results_output, pdf_output] |
|
) |
|
|
|
return interface |
|
|
|
if __name__ == "__main__": |
|
interface = create_interface() |
|
interface.launch( |
|
share=False, |
|
|
|
server_port=None |
|
) |
|
|