Spaces:
Running
Running
File size: 4,416 Bytes
ccaf8ca 2bd35a0 ccaf8ca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
import logging
import time
from pathlib import Path
import contextlib
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
)
import gradio as gr
import nltk
import torch
from pdf2text import *
_here = Path(__file__).parent
nltk.download("stopwords") # TODO=find where this requirement originates from
def load_uploaded_file(file_obj, temp_dir: Path = None):
"""
load_uploaded_file - process an uploaded file
Args:
file_obj (POTENTIALLY list): Gradio file object inside a list
Returns:
str, the uploaded file contents
"""
# check if mysterious file object is a list
# check if mysterious file object is a list
if isinstance(file_obj, list):
file_obj = file_obj[0]
file_path = Path(file_obj.name)
if temp_dir is None:
_temp_dir = _here / "temp"
_temp_dir.mkdir(exist_ok=True)
try:
pdf_bytes_obj = open(file_path, "rb").read()
temp_path = temp_dir / file_path.name if temp_dir else file_path
# save to PDF file
with open(temp_path, "wb") as f:
f.write(pdf_bytes_obj)
logging.info(f"Saved uploaded file to {temp_path}")
return str(temp_path.resolve())
except Exception as e:
logging.error(f"Trying to load file with path {file_path}, error: {e}")
print(f"Trying to load file with path {file_path}, error: {e}")
return None
def convert_PDF(pdf_obj, language: str = "en"):
"""
convert_PDF - convert a PDF file to text
Args:
pdf_bytes_obj (bytes): PDF file contents
language (str, optional): Language to use for OCR. Defaults to "en".
Returns:
str, the PDF file contents as text
"""
global ocr_model
st = time.perf_counter()
conversion_stats = convert_PDF_to_Text(
pdf_obj,
ocr_model=ocr_model,
max_pages=20,
)
converted_txt = conversion_stats["converted_text"]
num_pages = conversion_stats["num_pages"]
# if alt_lang: # TODO: fix this
rt = round((time.perf_counter() - st) / 60, 2)
print(f"Runtime: {rt} minutes")
html = ""
html += f"<p>Runtime: {rt} minutes on CPU for {num_pages} pages</p>"
return converted_txt, html
if __name__ == "__main__":
logging.info("Starting app")
use_GPU = torch.cuda.is_available()
logging.info(f"Using GPU status: {use_GPU}")
logging.info("Loading OCR model")
with contextlib.redirect_stdout(None):
ocr_model = ocr_predictor(
"db_resnet50",
"crnn_mobilenet_v3_large",
pretrained=True,
assume_straight_pages=True,
)
# define pdf bytes as None
pdf_obj = _here / "example_file.pdf"
pdf_obj = str(pdf_obj.resolve())
_temp_dir = _here / "temp"
_temp_dir.mkdir(exist_ok=True)
logging.info("starting demo")
demo = gr.Blocks()
with demo:
gr.Markdown("# PDF to Text")
gr.Markdown("**Upload a PDF file to convert to text**")
gr.Markdown("If no file is uploaded, a sample PDF will be used")
with gr.Column():
gr.Markdown("## Load Inputs")
gr.Markdown("Upload your own file:")
pdf_obj = gr.Textbox(
lines=1,
label="VM file path",
placeholder="When the file is uploaded, the path will appear here",
value=pdf_obj,
)
with gr.Row():
uploaded_file = gr.File(
label="Upload a PDF file",
file_count="single",
type="file",
)
load_file_button = gr.Button("Load Uploaded File")
gr.Markdown("---")
with gr.Column():
gr.Markdown("## Convert PDF to Text")
convert_button = gr.Button("Convert PDF!")
out_placeholder = gr.HTML("<p><em>Output will appear below:</em></p>")
gr.Markdown("### Output")
OCR_text = gr.Textbox(
label="OCR Result", placeholder="The OCR text will appear here"
)
load_file_button.click(
fn=load_uploaded_file, inputs=uploaded_file, outputs=[pdf_obj]
)
convert_button.click(
fn=convert_PDF, inputs=[pdf_obj], outputs=[OCR_text, out_placeholder]
)
demo.launch(enable_queue=True)
|