File size: 15,735 Bytes
0c80b43
0e6dbe2
0c80b43
 
 
 
 
 
 
 
3700c3a
0e6dbe2
 
 
 
 
0c80b43
0e6dbe2
 
 
0c80b43
0e6dbe2
 
 
 
 
 
 
0c80b43
 
0e6dbe2
e444d56
 
 
 
f652e83
e444d56
f652e83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e444d56
 
 
f652e83
e444d56
 
 
0e6dbe2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e444d56
 
 
 
 
 
 
 
 
 
 
 
 
6a6e3b4
37896f7
 
e444d56
 
 
 
 
0c80b43
e444d56
 
 
0e6dbe2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e444d56
 
 
 
 
 
0e6dbe2
e444d56
 
 
6a6e3b4
e444d56
 
 
 
 
 
6a6e3b4
e444d56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6a6e3b4
 
 
 
 
 
37896f7
e444d56
 
 
 
 
 
 
 
 
 
 
6a6e3b4
e444d56
 
 
 
 
0e6dbe2
0c80b43
e444d56
 
 
 
 
6a6e3b4
0e6dbe2
e444d56
0e6dbe2
 
 
 
 
91e3e31
 
4dd18db
e444d56
0e6dbe2
4dd18db
e444d56
 
 
 
91e3e31
 
 
 
 
 
6a6e3b4
91e3e31
 
 
 
 
 
0e6dbe2
91e3e31
e444d56
0e6dbe2
 
 
 
0c80b43
0e6dbe2
 
 
 
4dd18db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0c80b43
4dd18db
 
 
 
 
 
0e6dbe2
0c80b43
0e6dbe2
 
 
 
0c80b43
0e6dbe2
 
 
 
4dd18db
0e6dbe2
0c80b43
0e6dbe2
 
0c80b43
0e6dbe2
 
 
 
 
0c80b43
0e6dbe2
 
 
 
 
0c80b43
0e6dbe2
 
 
 
 
0c80b43
0e6dbe2
 
0c80b43
0e6dbe2
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
import re
import fitz  # PyMuPDF
from pdfminer.high_level import extract_text
from pdfminer.layout import LAParams
import language_tool_python
from typing import List, Dict, Any, Tuple
from collections import Counter
import json
import traceback
import io
import tempfile
import os
import gradio as gr

# Set JAVA_HOME environment variable
os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-11-openjdk-amd64'

# ------------------------------
# Analysis Functions
# ------------------------------

def extract_pdf_text_by_page(file) -> List[str]:
    """Extracts text from a PDF file, page by page, using PyMuPDF."""
    if isinstance(file, str):
        with fitz.open(file) as doc:
            return [page.get_text("text") for page in doc]
    else:
        with fitz.open(stream=file.read(), filetype="pdf") as doc:
            return [page.get_text("text") for page in doc]

def extract_pdf_text(file) -> str:
    """Extracts full text from a PDF file using PyMuPDF."""
    try:
        doc = fitz.open(stream=file.read(), filetype="pdf") if not isinstance(file, str) else fitz.open(file)
        full_text = ""
        
        for page_num, page in enumerate(doc, start=1):
            # Get text blocks with their coordinates
            blocks = page.get_text("blocks")
            processed_text = ""
            
            for block in blocks:
                text = block[4]  # The text content is at index 4
                
                # Handle line-break hyphens
                text = re.sub(r'(\w+)-\s*\n\s*(\w+)', lambda m: m.group(1) + m.group(2), text)
                
                # Preserve regular hyphens within words (e.g., "state-of-the-art")
                processed_text += text + "\n"
            
            full_text += processed_text
            print(f"Extracted text from page {page_num}: {len(processed_text)} characters.")
            
        doc.close()
        print(f"Total extracted text length: {len(full_text)} characters.")
        return full_text
        
    except Exception as e:
        print(f"Error extracting text from PDF: {e}")
        return ""

def check_text_presence(full_text: str, search_terms: List[str]) -> Dict[str, bool]:
    """Checks for the presence of required terms in the text."""
    return {term: term.lower() in full_text.lower() for term in search_terms}

def label_authors(full_text: str) -> str:
    """Label authors in the text with 'Authors:' if not already labeled."""
    author_line_regex = r"^(?:.*\n)(.*?)(?:\n\n)"
    match = re.search(author_line_regex, full_text, re.MULTILINE)
    if match:
        authors = match.group(1).strip()
        return full_text.replace(authors, f"Authors: {authors}")
    return full_text

def check_metadata(full_text: str) -> Dict[str, Any]:
    """Check for metadata elements."""
    return {
        "author_email": bool(re.search(r'\b[\w.-]+?@\w+?\.\w+?\b', full_text)),
        "list_of_authors": bool(re.search(r'Authors?:', full_text, re.IGNORECASE)),
        "keywords_list": bool(re.search(r'Keywords?:', full_text, re.IGNORECASE)),
        "word_count": len(full_text.split()) or "Missing"
    }

def check_disclosures(full_text: str) -> Dict[str, bool]:
    """Check for disclosure statements."""
    search_terms = [
        "author contributions statement",
        "conflict of interest statement",
        "ethics statement",
        "funding statement",
        "data access statement"
    ]
    return check_text_presence(full_text, search_terms)

def check_figures_and_tables(full_text: str) -> Dict[str, bool]:
    """Check for figures and tables."""
    return {
        "figures_with_citations": bool(re.search(r'Figure \d+.*?citation', full_text, re.IGNORECASE)),
        "figures_legends": bool(re.search(r'Figure \d+.*?legend', full_text, re.IGNORECASE)),
        "tables_legends": bool(re.search(r'Table \d+.*?legend', full_text, re.IGNORECASE))
    }

def check_references(full_text: str) -> Dict[str, Any]:
    """Check for references."""
    return {
        "old_references": bool(re.search(r'\b19[0-9]{2}\b', full_text)),
        "citations_in_abstract": bool(re.search(r'\b(citation|reference)\b', full_text[:1000], re.IGNORECASE)),
        "reference_count": len(re.findall(r'\[.*?\]', full_text)),
        "self_citations": bool(re.search(r'Self-citation', full_text, re.IGNORECASE))
    }

def check_structure(full_text: str) -> Dict[str, bool]:
    """Check document structure."""
    return {
        "imrad_structure": all(section in full_text for section in ["Introduction", "Methods", "Results", "Discussion"]),
        "abstract_structure": "structured abstract" in full_text.lower()
    }

def check_language_issues(full_text: str) -> Dict[str, Any]:
    """Check for language issues using LanguageTool."""
    try:
        language_tool = language_tool_python.LanguageTool('en-US')
        matches = language_tool.check(full_text)
        issues = []
        for match in matches:
            issues.append({
                "message": match.message,
                "context": match.context.strip(),
                "suggestions": match.replacements[:3] if match.replacements else [],
                "category": match.category,
                "rule_id": match.ruleId,
                "offset": match.offset,
                "length": match.errorLength,
                "coordinates":[],
                "page":0
            })
        print(f"Total language issues found: {len(issues)}")
        return {
            "total_issues": len(issues),
            "issues": issues
        }
    except Exception as e:
        print(f"Error checking language issues: {e}")
        return {"error": str(e)}

def check_language(full_text: str) -> Dict[str, Any]:
    """Check language quality."""
    return {
        "plain_language": bool(re.search(r'plain language summary', full_text, re.IGNORECASE)),
        "readability_issues": False,  # Placeholder for future implementation
        "language_issues": check_language_issues(full_text)
    }

def check_figure_order(full_text: str) -> Dict[str, Any]:
    """Check if figures are referred to in sequential order."""
    figure_pattern = r'(?:Fig(?:ure)?\.?|Figure)\s*(\d+)'
    figure_references = re.findall(figure_pattern, full_text, re.IGNORECASE)
    figure_numbers = sorted(set(int(num) for num in figure_references))
    
    is_sequential = all(a + 1 == b for a, b in zip(figure_numbers, figure_numbers[1:]))
    
    if figure_numbers:
        expected_figures = set(range(1, max(figure_numbers) + 1))
        missing_figures = list(expected_figures - set(figure_numbers))
    else:
        missing_figures = None

    duplicates = [num for num, count in Counter(figure_references).items() if count > 1]
    duplicate_numbers = [int(num) for num in duplicates]
    not_mentioned = list(set(figure_references) - set(duplicates))
    
    return {
        "sequential_order": is_sequential,
        "figure_count": len(figure_numbers),
        "missing_figures": missing_figures,
        "figure_order": figure_numbers,
        "duplicate_references": duplicates,
        "not_mentioned": not_mentioned
    }

def check_reference_order(full_text: str) -> Dict[str, Any]:
    """Check if references in the main body text are in order."""
    reference_pattern = r'\[(\d+)\]'
    references = re.findall(reference_pattern, full_text)
    ref_numbers = [int(ref) for ref in references]
    
    max_ref = 0
    out_of_order = []
    for i, ref in enumerate(ref_numbers):
        if ref > max_ref + 1:
            out_of_order.append((i+1, ref))
        max_ref = max(max_ref, ref)
    
    all_refs = set(range(1, max_ref + 1))
    used_refs = set(ref_numbers)
    missing_refs = list(all_refs - used_refs)
    
    return {
        "max_reference": max_ref,
        "out_of_order": out_of_order,
        "missing_references": missing_refs,
        "is_ordered": len(out_of_order) == 0 and len(missing_refs) == 0
    }

def highlight_issues_in_pdf(file, language_matches: List[Dict[str, Any]]) -> bytes:
    """
    Highlights language issues in the PDF and returns the annotated PDF as bytes.
    This function maps LanguageTool matches to specific words in the PDF
    and highlights those words.
    """
    try:
        # Open the PDF
        doc = fitz.open(stream=file.read(), filetype="pdf") if not isinstance(file, str) else fitz.open(file)
        print(f"Opened PDF with {len(doc)} pages.")
        print(language_matches)
        # Extract words with positions from each page
        word_list = []  # List of tuples: (page_number, word, x0, y0, x1, y1)
        for page_number in range(len(doc)):
            page = doc[page_number]
            words = page.get_text("words")  # List of tuples: (x0, y0, x1, y1, "word", block_no, line_no, word_no)
            for w in words:
#                 print(w)
                word_text = w[4]
                # **Fix:** Insert a space before '[' to ensure "globally [2]" instead of "globally[2]"
                if '[' in word_text:
                    word_text = word_text.replace('[', ' [')
                word_list.append((page_number, word_text, w[0], w[1], w[2], w[3]))
        print(f"Total words extracted: {len(word_list)}")

        # Concatenate all words to form the full text
        concatenated_text = " ".join([w[1] for w in word_list])
        print(f"Concatenated text length: {len(concatenated_text)} characters.")

        # Iterate over each language issue
        for idx, issue in enumerate(language_matches, start=1):
            offset = issue["offset"]
            length = issue["length"]
            error_text = concatenated_text[offset:offset+length]
            print(f"\nIssue {idx}: '{error_text}' at offset {offset} with length {length}")

            # Find the words that fall within the error span
            current_pos = 0
            target_words = []
            for word in word_list:
                word_text = word[1]
                word_length = len(word_text) + 1  # +1 for the space

                if current_pos + word_length > offset and current_pos < offset + length:
                    target_words.append(word)
                current_pos += word_length

            if not target_words:
                print("No matching words found for this issue.")
                continue
            
            initial_x = target_words[0][2]
            initial_y = target_words[0][3]
            final_x = target_words[len(target_words)-1][4]
            final_y = target_words[len(target_words)-1][5]
            issue["coordinates"] = [initial_x, initial_y, final_x, final_y]
            issue["page"] = target_words[0][0] + 1
            # Add highlight annotations to the target words
            for target in target_words:
                page_num, word_text, x0, y0, x1, y1 = target
                page = doc[page_num]
                # Define a rectangle around the word with some padding
                rect = fitz.Rect(x0 - 1, y0 - 1, x1 + 1, y1 + 1)
                # Add a highlight annotation
                highlight = page.add_highlight_annot(rect)
                highlight.set_colors(stroke=(1, 1, 0))  # Yellow color
                highlight.update()
                print(f"Highlighted '{word_text}' on page {page_num + 1} at position ({x0}, {y0}, {x1}, {y1})")
                

        # Save annotated PDF to bytes
        byte_stream = io.BytesIO()
        doc.save(byte_stream)
        annotated_pdf_bytes = byte_stream.getvalue()
        doc.close()

        # Save annotated PDF locally for verification
        with open("annotated_temp.pdf", "wb") as f:
            f.write(annotated_pdf_bytes)
        print("Annotated PDF saved as 'annotated_temp.pdf' for manual verification.")

        return language_matches, annotated_pdf_bytes
    except Exception as e:
        print(f"Error in highlighting PDF: {e}")
        return b""
# ------------------------------
# Main Analysis Function
# ------------------------------

# server/gradio_client.py

def analyze_pdf(filepath: str) -> Tuple[Dict[str, Any], bytes]:
    """Analyzes the PDF for language issues and returns results and annotated PDF."""
    try:
        full_text = extract_pdf_text(filepath)
        if not full_text:
            return {"error": "Failed to extract text from PDF."}, None

        language_issues = check_language_issues(full_text)

        # Handle potential errors from check_language_issues
        if "error" in language_issues:
            return {"error": language_issues["error"]}, None

        issues = language_issues.get("issues", [])
        
        if issues:
            language_issues, annotated_pdf = highlight_issues_in_pdf(filepath, issues)
            return {"issues": language_issues}, annotated_pdf
        else:
            # Return a meaningful message and no annotated PDF if no issues are found
            return {"message": "No language issues found in the uploaded PDF."}, None
    except Exception as e:
        # Return the error message and no annotated PDF
        return {"error": str(e)}, None

# ------------------------------
# Gradio Interface
# ------------------------------

def process_upload(file):
    """
    Process the uploaded PDF file and return analysis results and annotated PDF.
    """
    # print(file.name)
    if file is None:
        return json.dumps({"error": "No file uploaded"}, indent=2), None

    # # Create a temporary file to work with
    
    # with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as temp_input:
    #     temp_input.write(file)
    #     temp_input_path = temp_input.name
    #     print(temp_input_path)
    
    temp_input = tempfile.NamedTemporaryFile(delete=False, suffix='.pdf')
    temp_input.write(file)
    temp_input_path = temp_input.name
    print(temp_input_path)
    # Analyze -inputthe PDF
    
    results, annotated_pdf = analyze_pdf(temp_input_path)
    
    print(results)
    results_json = json.dumps(results, indent=2)

    # Clean up the temporary input file
    os.unlink(temp_input_path)

    # If we have an annotated PDF, save it temporarily
    if annotated_pdf:
        with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
            tmp_file.write(annotated_pdf)
            return results_json, tmp_file.name

    return results_json, None
        
    # except Exception as e:
    #     error_message = json.dumps({
    #         "error": str(e),
    #         "traceback": traceback.format_exc()
    #     }, indent=2)
    #     return error_message, None
    
    
def create_interface():
    with gr.Blocks(title="PDF Analyzer") as interface:
        gr.Markdown("# PDF Analyzer")
        gr.Markdown("Upload a PDF document to analyze its structure, references, language, and more.")
        
        with gr.Row():
            file_input = gr.File(
                label="Upload PDF",
                file_types=[".pdf"],
                type="binary"
            )
        
        with gr.Row():
            analyze_btn = gr.Button("Analyze PDF")
        
        with gr.Row():
            results_output = gr.JSON(
                label="Analysis Results",
                show_label=True
            )
        
        with gr.Row():
            pdf_output = gr.File(
                label="Annotated PDF",
                show_label=True
            )
        
        analyze_btn.click(
            fn=process_upload,
            inputs=[file_input],
            outputs=[results_output, pdf_output]
        )
    
    return interface

if __name__ == "__main__":
    interface = create_interface()
    interface.launch(
        share=True,  # Set to False in production
        # server_name="0.0.0.0",
        server_port=None
    )