File size: 20,654 Bytes
12a89b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f652e83
12a89b7
 
 
 
 
 
 
 
 
f652e83
12a89b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0e200f
12a89b7
 
 
 
 
810882a
12a89b7
 
 
 
 
 
 
 
 
 
 
 
a0e200f
12a89b7
 
 
a0e200f
12a89b7
 
 
 
a0e200f
12a89b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0e200f
12a89b7
a0e200f
12a89b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e6dbe2
12a89b7
0e6dbe2
12a89b7
 
 
 
 
0e6dbe2
12a89b7
 
 
0e6dbe2
12a89b7
 
 
 
 
 
 
 
 
 
 
 
 
 
0e6dbe2
12a89b7
 
 
 
 
 
0e6dbe2
12a89b7
 
 
0e6dbe2
12a89b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dde32e5
12a89b7
e444d56
12a89b7
 
 
18c6797
12a89b7
 
 
4bb46a1
12a89b7
 
 
 
4bb46a1
12a89b7
 
 
dde32e5
 
12a89b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c85e0b2
e444d56
12a89b7
 
 
 
 
0c80b43
12a89b7
 
 
 
c85e0b2
12a89b7
 
 
 
a0e200f
4bb46a1
 
 
12a89b7
 
 
0e6dbe2
12a89b7
91e3e31
12a89b7
 
 
 
 
 
810882a
12a89b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4dd18db
12a89b7
 
 
 
4dd18db
12a89b7
 
 
 
 
4dd18db
12a89b7
4dd18db
12a89b7
 
4dd18db
12a89b7
 
4dd18db
12a89b7
 
 
 
 
4dd18db
12a89b7
0c80b43
12a89b7
 
 
 
 
 
0e6dbe2
a0e200f
12a89b7
 
 
 
0c80b43
12a89b7
 
 
 
 
 
0c80b43
12a89b7
 
0c80b43
12a89b7
 
 
 
 
0c80b43
12a89b7
 
 
 
 
0c80b43
12a89b7
 
 
 
 
0c80b43
12a89b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e6dbe2
0c80b43
12a89b7
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
# import re
# import fitz  # PyMuPDF
# from pdfminer.high_level import extract_text
# from pdfminer.layout import LAParams
# import language_tool_python
# from typing import List, Dict, Any, Tuple
# from collections import Counter
# import json
# import traceback
# import io
# import tempfile
# import os
# import gradio as gr

# # Set JAVA_HOME environment variable
# os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-11-openjdk-amd64'

# # ------------------------------
# # Analysis Functions
# # ------------------------------

# # def extract_pdf_text_by_page(file) -> List[str]:
# #     """Extracts text from a PDF file, page by page, using PyMuPDF."""
# #     if isinstance(file, str):
# #         with fitz.open(file) as doc:
# #             return [page.get_text("text") for page in doc]
# #     else:
# #         with fitz.open(stream=file.read(), filetype="pdf") as doc:
# #             return [page.get_text("text") for page in doc]

# def extract_pdf_text(file) -> str:
#     """Extracts full text from a PDF file using PyMuPDF."""
#     try:
#         doc = fitz.open(stream=file.read(), filetype="pdf") if not isinstance(file, str) else fitz.open(file)
#         full_text = ""
        
#         for page_number in range(len(doc)):
#             page = doc[page_number]
#             words = page.get_text("word")
#             full_text += words

#         print(full_text)
#         doc.close()
#         print(f"Total extracted text length: {len(full_text)} characters.")
#         return full_text
        
#     except Exception as e:
#         print(f"Error extracting text from PDF: {e}")
#         return ""

# def check_text_presence(full_text: str, search_terms: List[str]) -> Dict[str, bool]:
#     """Checks for the presence of required terms in the text."""
#     return {term: term.lower() in full_text.lower() for term in search_terms}

# def label_authors(full_text: str) -> str:
#     """Label authors in the text with 'Authors:' if not already labeled."""
#     author_line_regex = r"^(?:.*\n)(.*?)(?:\n\n)"
#     match = re.search(author_line_regex, full_text, re.MULTILINE)
#     if match:
#         authors = match.group(1).strip()
#         return full_text.replace(authors, f"Authors: {authors}")
#     return full_text

# def check_metadata(full_text: str) -> Dict[str, Any]:
#     """Check for metadata elements."""
#     return {
#         "author_email": bool(re.search(r'\b[\w.-]+?@\w+?\.\w+?\b', full_text)),
#         "list_of_authors": bool(re.search(r'Authors?:', full_text, re.IGNORECASE)),
#         "keywords_list": bool(re.search(r'Keywords?:', full_text, re.IGNORECASE)),
#         "word_count": len(full_text.split()) or "Missing"
#     }

# def check_disclosures(full_text: str) -> Dict[str, bool]:
#     """Check for disclosure statements."""
#     search_terms = [
#         "author contributions statement",
#         "conflict of interest statement",
#         "ethics statement",
#         "funding statement",
#         "data access statement"
#     ]
#     return check_text_presence(full_text, search_terms)

# def check_figures_and_tables(full_text: str) -> Dict[str, bool]:
#     """Check for figures and tables."""
#     return {
#         "figures_with_citations": bool(re.search(r'Figure \d+.*?citation', full_text, re.IGNORECASE)),
#         "figures_legends": bool(re.search(r'Figure \d+.*?legend', full_text, re.IGNORECASE)),
#         "tables_legends": bool(re.search(r'Table \d+.*?legend', full_text, re.IGNORECASE))
#     }

# def check_references(full_text: str) -> Dict[str, Any]:
#     """Check for references."""
#     return {
#         "old_references": bool(re.search(r'\b19[0-9]{2}\b', full_text)),
#         "citations_in_abstract": bool(re.search(r'\b(citation|reference)\b', full_text[:1000], re.IGNORECASE)),
#         "reference_count": len(re.findall(r'\[.*?\]', full_text)),
#         "self_citations": bool(re.search(r'Self-citation', full_text, re.IGNORECASE))
#     }

# def check_structure(full_text: str) -> Dict[str, bool]:
#     """Check document structure."""
#     return {
#         "imrad_structure": all(section in full_text for section in ["Introduction", "Methods", "Results", "Discussion"]),
#         "abstract_structure": "structured abstract" in full_text.lower()
#     }

# def check_language_issues(full_text: str) -> Dict[str, Any]:
#     """Check for language issues using LanguageTool and additional regex patterns."""
#     try:
#         language_tool = language_tool_python.LanguageTool('en-US')
#         matches = language_tool.check(full_text)
#         issues = []
        
#         # Process LanguageTool matches
#         for match in matches:
#             # Ignore issues with rule_id 'EN_SPLIT_WORDS_HYPHEN'
#             if match.ruleId == "EN_SPLIT_WORDS_HYPHEN":
#                 continue
                
#             issues.append({
#                 "message": match.message,
#                 "context": match.context.strip(),
#                 "suggestions": match.replacements[:3] if match.replacements else [],
#                 "category": match.category,
#                 "rule_id": match.ruleId,
#                 "offset": match.offset,
#                 "length": match.errorLength,
#                 "coordinates": [],
#                 "page": 0
#             })
#         print(f"Total language issues found: {len(issues)}")
        
#         # -----------------------------------
#         # Additions: Regex-based Issue Detection
#         # -----------------------------------
        
#         # Define regex pattern to find words immediately followed by '[' without space
#         regex_pattern = r'\b(\w+)\[(\d+)\]'
#         regex_matches = list(re.finditer(regex_pattern, full_text))
#         print(f"Total regex issues found: {len(regex_matches)}")
        
#         # Process regex matches
#         for match in regex_matches:
#             word = match.group(1)
#             number = match.group(2)
#             start = match.start()
#             end = match.end()
#             issues.append({
#                 "message": f"Missing space before '[' in '{word}[{number}]'. Should be '{word} [{number}]'.",
#                 "context": full_text[max(match.start() - 30, 0):min(match.end() + 30, len(full_text))].strip(),
#                 "suggestions": [f"{word} [{number}]", f"{word} [`{number}`]", f"{word} [number {number}]"],
#                 "category": "Formatting",
#                 "rule_id": "SPACE_BEFORE_BRACKET",
#                 "offset": match.start(),
#                 "length": match.end() - match.start(),
#                 "coordinates": [],
#                 "page": 0
#             })
        
#         print(f"Total combined issues found: {len(issues)}")
        
#         return {
#             "total_issues": len(issues),
#             "issues": issues
#         }
#     except Exception as e:
#         print(f"Error checking language issues: {e}")
#         return {"error": str(e)}

# def check_language(full_text: str) -> Dict[str, Any]:
#     """Check language quality."""
#     return {
#         "plain_language": bool(re.search(r'plain language summary', full_text, re.IGNORECASE)),
#         "readability_issues": False,  # Placeholder for future implementation
#         "language_issues": check_language_issues(full_text)
#     }

# def check_figure_order(full_text: str) -> Dict[str, Any]:
#     """Check if figures are referred to in sequential order."""
#     figure_pattern = r'(?:Fig(?:ure)?\.?|Figure)\s*(\d+)'
#     figure_references = re.findall(figure_pattern, full_text, re.IGNORECASE)
#     figure_numbers = sorted(set(int(num) for num in figure_references))
    
#     is_sequential = all(a + 1 == b for a, b in zip(figure_numbers, figure_numbers[1:]))
    
#     if figure_numbers:
#         expected_figures = set(range(1, max(figure_numbers) + 1))
#         missing_figures = list(expected_figures - set(figure_numbers))
#     else:
#         missing_figures = None

#     duplicates = [num for num, count in Counter(figure_references).items() if count > 1]
#     duplicate_numbers = [int(num) for num in duplicates]
#     not_mentioned = list(set(figure_references) - set(duplicates))
    
#     return {
#         "sequential_order": is_sequential,
#         "figure_count": len(figure_numbers),
#         "missing_figures": missing_figures,
#         "figure_order": figure_numbers,
#         "duplicate_references": duplicates,
#         "not_mentioned": not_mentioned
#     }

# def check_reference_order(full_text: str) -> Dict[str, Any]:
#     """Check if references in the main body text are in order."""
#     reference_pattern = r'\[(\d+)\]'
#     references = re.findall(reference_pattern, full_text)
#     ref_numbers = [int(ref) for ref in references]
    
#     max_ref = 0
#     out_of_order = []
#     for i, ref in enumerate(ref_numbers):
#         if ref > max_ref + 1:
#             out_of_order.append((i+1, ref))
#         max_ref = max(max_ref, ref)
    
#     all_refs = set(range(1, max_ref + 1))
#     used_refs = set(ref_numbers)
#     missing_refs = list(all_refs - used_refs)
    
#     return {
#         "max_reference": max_ref,
#         "out_of_order": out_of_order,
#         "missing_references": missing_refs,
#         "is_ordered": len(out_of_order) == 0 and len(missing_refs) == 0
#     }

# def highlight_issues_in_pdf(file, language_matches: List[Dict[str, Any]]) -> bytes:
#     """
#     Highlights language issues in the PDF and returns the annotated PDF as bytes.
#     This function maps LanguageTool matches to specific words in the PDF
#     and highlights those words.
#     """
#     try:
#         # Open the PDF
#         doc = fitz.open(stream=file.read(), filetype="pdf") if not isinstance(file, str) else fitz.open(file)
#         # print(f"Opened PDF with {len(doc)} pages.")
#         # print(language_matches)
#         # Extract words with positions from each page
#         word_list = []  # List of tuples: (page_number, word, x0, y0, x1, y1)
#         for page_number in range(len(doc)):
#             page = doc[page_number]
#             print(page.get_text("words"))
#             words = page.get_text("words")  # List of tuples: (x0, y0, x1, y1, "word", block_no, line_no, word_no)
#             for w in words:
# #                 print(w)
#                 word_text = w[4]
#                 # **Fix:** Insert a space before '[' to ensure "globally [2]" instead of "globally[2]"
#                 # if '[' in word_text:
#                 #     word_text = word_text.replace('[', ' [')
#                 word_list.append((page_number, word_text, w[0], w[1], w[2], w[3]))
#         # print(f"Total words extracted: {len(word_list)}")

#         # Concatenate all words to form the full text
#         concatenated_text=""
#         concatenated_text = " ".join([w[1] for w in word_list])
        
#         # print(f"Concatenated text length: {concatenated_text} characters.")

#         # Find "Abstract" section and set the processing start point
#         abstract_start = concatenated_text.lower().find("abstract")
#         abstract_offset = 0 if abstract_start == -1 else abstract_start

#         # Find "References" section and exclude from processing
#         references_start = concatenated_text.lower().find("references")
#         references_offset = len(concatenated_text) if references_start == -1 else references_start

#         # Iterate over each language issue
#         for idx, issue in enumerate(language_matches, start=1):
#             offset = issue["offset"]  # offset+line_no-1
#             length = issue["length"]

#             # Skip issues in the references section
#             if offset < abstract_offset or offset >= references_offset:
#                 continue
            
            
#             error_text = concatenated_text[offset:offset+length]
#             print(f"\nIssue {idx}: '{error_text}' at offset {offset} with length {length}")

#             # Find the words that fall within the error span
#             current_pos = 0
#             target_words = []
#             for word in word_list:
#                 word_text = word[1]
#                 word_length = len(word_text) + 1  # +1 for the space

#                 if current_pos + word_length > offset and current_pos < offset + length:
#                     target_words.append(word)
#                 current_pos += word_length

#             if not target_words:
#                 # print("No matching words found for this issue.")
#                 continue

#             initial_x = target_words[0][2]
#             initial_y = target_words[0][3]
#             final_x = target_words[len(target_words)-1][4]
#             final_y = target_words[len(target_words)-1][5]
#             issue["coordinates"] = [initial_x, initial_y, final_x, final_y]
#             issue["page"] = target_words[0][0] + 1
#             # Add highlight annotations to the target words
#             print()
#             print("issue", issue)
#             print("error text", error_text)
#             print(target_words)
#             print()
#             for target in target_words:
#                 page_num, word_text, x0, y0, x1, y1 = target
#                 page = doc[page_num]
#                 # Define a rectangle around the word with some padding
#                 rect = fitz.Rect(x0 - 1, y0 - 1, x1 + 1, y1 + 1)
#                 # Add a highlight annotation
#                 highlight = page.add_highlight_annot(rect)
#                 highlight.set_colors(stroke=(1, 1, 0))  # Yellow color
#                 highlight.update()
#                 # print(f"Highlighted '{word_text}' on page {page_num + 1} at position ({x0}, {y0}, {x1}, {y1})")
            

#         # Save annotated PDF to bytes
#         byte_stream = io.BytesIO()
#         doc.save(byte_stream)
#         annotated_pdf_bytes = byte_stream.getvalue()
#         doc.close()

#         # Save annotated PDF locally for verification
#         with open("annotated_temp.pdf", "wb") as f:
#             f.write(annotated_pdf_bytes)
#         # print("Annotated PDF saved as 'annotated_temp.pdf' for manual verification.")

#         return language_matches, annotated_pdf_bytes
#     except Exception as e:
#         print(f"Error in highlighting PDF: {e}")
#         return b""




# # ------------------------------
# # Main Analysis Function
# # ------------------------------

# # server/gradio_client.py

# def analyze_pdf(filepath: str) -> Tuple[Dict[str, Any], bytes]:
#     """Analyzes the PDF for language issues and returns results and annotated PDF."""
#     try:
#         full_text = extract_pdf_text(filepath)
#         if not full_text:
#             return {"error": "Failed to extract text from PDF."}, None
        
#         # Create the results structure
#         results = {
#             "issues": [],  # Initialize as empty array
#             "regex_checks": {
#                 "metadata": check_metadata(full_text),
#                 "disclosures": check_disclosures(full_text),
#                 "figures_and_tables": check_figures_and_tables(full_text),
#                 "references": check_references(full_text),
#                 "structure": check_structure(full_text),
#                 "figure_order": check_figure_order(full_text),
#                 "reference_order": check_reference_order(full_text)
#             }
#         }

#         # Handle language issues
#         language_issues = check_language_issues(full_text)
#         if "error" in language_issues:
#             return {"error": language_issues["error"]}, None

#         issues = language_issues.get("issues", [])
#         if issues:
#             language_matches, annotated_pdf = highlight_issues_in_pdf(filepath, issues)
#             results["issues"] = language_matches  # This is already an array from check_language_issues
#             return results, annotated_pdf
#         else:
#             # Keep issues as empty array if none found
#             return results, None

#     except Exception as e:
#         return {"error": str(e)}, None
# # ------------------------------
# # Gradio Interface
# # ------------------------------

# def process_upload(file):
#     """
#     Process the uploaded PDF file and return analysis results and annotated PDF.
#     """
#     # print(file.name)
#     if file is None:
#         return json.dumps({"error": "No file uploaded"}, indent=2), None

#     # # Create a temporary file to work with
    
#     # with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as temp_input:
#     #     temp_input.write(file)
#     #     temp_input_path = temp_input.name
#     #     print(temp_input_path)
    
#     temp_input = tempfile.NamedTemporaryFile(delete=False, suffix='.pdf')
#     temp_input.write(file)
#     temp_input_path = temp_input.name
#     print(temp_input_path)
#     # Analyze the PDF
    
#     results, annotated_pdf = analyze_pdf(temp_input_path)
    
#     print(results)
#     results_json = json.dumps(results, indent=2)

#     # Clean up the temporary input file
#     os.unlink(temp_input_path)

#     # If we have an annotated PDF, save it temporarily
#     if annotated_pdf:
#         with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
#             tmp_file.write(annotated_pdf)
#             return results_json, tmp_file.name

#     return results_json, None
        
#     # except Exception as e:
#     #     error_message = json.dumps({
#     #         "error": str(e),
#     #         "traceback": traceback.format_exc()
#     #     }, indent=2)
#     #     return error_message, None
    

# def create_interface():
#     with gr.Blocks(title="PDF Analyzer") as interface:
#         gr.Markdown("# PDF Analyzer")
#         gr.Markdown("Upload a PDF document to analyze its structure, references, language, and more.")
        
#         with gr.Row():
#             file_input = gr.File(
#                 label="Upload PDF",
#                 file_types=[".pdf"],
#                 type="binary"
#             )
        
#         with gr.Row():
#             analyze_btn = gr.Button("Analyze PDF")
        
#         with gr.Row():
#             results_output = gr.JSON(
#                 label="Analysis Results",
#                 show_label=True
#             )
        
#         with gr.Row():
#             pdf_output = gr.File(
#                 label="Annotated PDF",
#                 show_label=True
#             )
        
#         analyze_btn.click(
#             fn=process_upload,
#             inputs=[file_input],
#             outputs=[results_output, pdf_output]
#         )
    
#     return interface

# if __name__ == "__main__":
#     interface = create_interface()
#     interface.launch(
#         share=False,  # Set to False in production
#         # server_name="0.0.0.0",
#         server_port=None
#     )


import os
import requests
from flask import Flask, jsonify

app = Flask(__name__)

# Directory and file configuration
NGRAM_DATA_DIR = "./ngram_data"
NGRAM_FILE_NAME = "ngrams-en-20150817.zip"
NGRAM_FILE_PATH = os.path.join(NGRAM_DATA_DIR, NGRAM_FILE_NAME)
NGRAM_DOWNLOAD_URL = "https://languagetool.org/download/ngram-data/ngrams-en-20150817.zip"

# Ensure the directory exists
def ensure_directory_exists():
    if not os.path.exists(NGRAM_DATA_DIR):
        os.makedirs(NGRAM_DATA_DIR)

# Download the n-gram data if not already downloaded
def download_ngram_data():
    if os.path.exists(NGRAM_FILE_PATH):
        print(f"File already exists at {NGRAM_FILE_PATH}, skipping download.")
        return

    print(f"Downloading n-gram data from {NGRAM_DOWNLOAD_URL}...")
    response = requests.get(NGRAM_DOWNLOAD_URL, stream=True)

    if response.status_code == 200:
        with open(NGRAM_FILE_PATH, "wb") as f:
            for chunk in response.iter_content(chunk_size=8192):
                f.write(chunk)
        print(f"Downloaded and saved to {NGRAM_FILE_PATH}.")
    else:
        raise Exception(f"Failed to download n-gram data. HTTP Status Code: {response.status_code}")

@app.route('/')
def home():
    return jsonify({"message": "Welcome to the LanguageTool n-gram downloader!"})

@app.route('/download-ngram', methods=['GET'])
def download_ngram():
    try:
        ensure_directory_exists()
        download_ngram_data()
        return jsonify({"message": "N-gram data is downloaded and saved.", "path": NGRAM_FILE_PATH})
    except Exception as e:
        return jsonify({"error": str(e)}), 500

if __name__ == "__main__":
    ensure_directory_exists()
    download_ngram_data()
    app.run(debug=True)