|
|
|
!pip install gradio |
|
!pip install transformers |
|
!pip install torch |
|
|
|
!pip install PyPDF2 |
|
!pip install pdfminer.six |
|
!pip install pdfplumber |
|
!pip install pdf2image |
|
!pip install Pillow |
|
!pip install pytesseract |
|
|
|
!apt-get install poppler-utils |
|
!apt install tesseract-ocr |
|
!apt install libtesseract-dev |
|
|
|
|
|
|
|
from transformers import pipeline |
|
import gradio as gr |
|
import torch |
|
import PyPDF2 |
|
import pdfplumber |
|
|
|
|
|
|
|
|
|
|
|
|
|
from pdfminer.high_level import extract_pages, extract_text |
|
from pdfminer.layout import LTTextContainer, LTChar, LTRect, LTFigure |
|
|
|
from PIL import Image |
|
from pdf2image import convert_from_path |
|
|
|
def text_extraction(element): |
|
|
|
line_text = element.get_text() |
|
|
|
|
|
|
|
line_formats = [] |
|
for text_line in element: |
|
if isinstance(text_line, LTTextContainer): |
|
|
|
for character in text_line: |
|
if isinstance(character, LTChar): |
|
|
|
line_formats.append(character.fontname) |
|
|
|
line_formats.append(character.size) |
|
|
|
format_per_line = list(set(line_formats)) |
|
|
|
|
|
return (line_text, format_per_line) |
|
|
|
|
|
def crop_image(element, pageObj): |
|
|
|
[image_left, image_top, image_right, image_bottom] = [element.x0,element.y0,element.x1,element.y1] |
|
|
|
pageObj.mediabox.lower_left = (image_left, image_bottom) |
|
pageObj.mediabox.upper_right = (image_right, image_top) |
|
|
|
cropped_pdf_writer = PyPDF2.PdfWriter() |
|
cropped_pdf_writer.add_page(pageObj) |
|
|
|
with open('cropped_image.pdf', 'wb') as cropped_pdf_file: |
|
cropped_pdf_writer.write(cropped_pdf_file) |
|
|
|
|
|
def convert_to_images(input_file,): |
|
images = convert_from_path(input_file) |
|
image = images[0] |
|
output_file = "PDF_image.png" |
|
image.save(output_file, "PNG") |
|
|
|
|
|
def image_to_text(image_path): |
|
|
|
img = Image.open(image_path) |
|
|
|
text = pytesseract.image_to_string(img) |
|
return text |
|
|
|
|
|
|
|
def extract_table(pdf_path, page_num, table_num): |
|
|
|
pdf = pdfplumber.open(pdf_path) |
|
|
|
table_page = pdf.pages[page_num] |
|
|
|
table = table_page.extract_tables()[table_num] |
|
return table |
|
|
|
|
|
def table_converter(table): |
|
table_string = '' |
|
|
|
for row_num in range(len(table)): |
|
row = table[row_num] |
|
|
|
cleaned_row = [item.replace('\n', ' ') if item is not None and '\n' in item else 'None' if item is None else item for item in row] |
|
|
|
table_string+=('|'+'|'.join(cleaned_row)+'|'+'\n') |
|
|
|
table_string = table_string[:-1] |
|
return table_string |
|
|
|
def read_pdf(pdf_path): |
|
|
|
pdfFileObj = open('/content/Article_11', 'rb') |
|
|
|
pdfReaded = PyPDF2.PdfReader(pdfFileObj) |
|
|
|
|
|
text_per_page = {} |
|
|
|
for pagenum, page in enumerate(extract_pages(pdf_path)): |
|
print("Elaborating Page_" +str(pagenum)) |
|
|
|
pageObj = pdfReaded.pages[pagenum] |
|
page_text = [] |
|
line_format = [] |
|
text_from_images = [] |
|
text_from_tables = [] |
|
page_content = [] |
|
|
|
table_num = 0 |
|
first_element= True |
|
table_extraction_flag= False |
|
|
|
pdf = pdfplumber.open(pdf_path) |
|
|
|
page_tables = pdf.pages[pagenum] |
|
|
|
tables = page_tables.find_tables() |
|
|
|
|
|
|
|
page_elements = [(element.y1, element) for element in page._objs] |
|
|
|
page_elements.sort(key=lambda a: a[0], reverse=True) |
|
|
|
|
|
for i,component in enumerate(page_elements): |
|
|
|
pos= component[0] |
|
|
|
element = component[1] |
|
|
|
|
|
if isinstance(element, LTTextContainer): |
|
|
|
if table_extraction_flag == False: |
|
|
|
(line_text, format_per_line) = text_extraction(element) |
|
|
|
page_text.append(line_text) |
|
|
|
line_format.append(format_per_line) |
|
page_content.append(line_text) |
|
else: |
|
|
|
pass |
|
|
|
|
|
if isinstance(element, LTFigure): |
|
|
|
crop_image(element, pageObj) |
|
|
|
convert_to_images('cropped_image.pdf') |
|
|
|
image_text = image_to_text('PDF_image.png') |
|
text_from_images.append(image_text) |
|
page_content.append(image_text) |
|
|
|
page_text.append('image') |
|
line_format.append('image') |
|
|
|
|
|
if isinstance(element, LTRect): |
|
|
|
if first_element == True and (table_num+1) <= len(tables): |
|
|
|
lower_side = page.bbox[3] - tables[table_num].bbox[3] |
|
upper_side = element.y1 |
|
|
|
table = extract_table(pdf_path, pagenum, table_num) |
|
|
|
table_string = table_converter(table) |
|
|
|
text_from_tables.append(table_string) |
|
page_content.append(table_string) |
|
|
|
table_extraction_flag = True |
|
|
|
first_element = False |
|
|
|
page_text.append('table') |
|
line_format.append('table') |
|
|
|
|
|
if element.y0 >= lower_side and element.y1 <= upper_side: |
|
pass |
|
elif not isinstance(page_elements[i+1][1], LTRect): |
|
table_extraction_flag = False |
|
first_element = True |
|
table_num+=1 |
|
|
|
|
|
|
|
dctkey = 'Page_'+str(pagenum) |
|
|
|
text_per_page[dctkey]= [page_text, line_format, text_from_images,text_from_tables, page_content] |
|
|
|
|
|
pdfFileObj.close() |
|
|
|
return text_per_page |
|
|
|
pdf_path = '/content/Article_11' |
|
|
|
text_per_page = read_pdf(pdf_path) |
|
|
|
text_per_page.keys() |
|
page_0 = text_per_page['Page_0'] |
|
page_0 |
|
|
|
page_0_clean = [item for sublist in page_0 for item in sublist if isinstance(item, str)] |
|
for i in range(len(page_0_clean)): |
|
page_0_clean[i] = page_0_clean[i].replace('\n', ' ').strip() |
|
|
|
|
|
page_0_clean |
|
|
|
|
|
def process_pdf(pdf): |
|
|
|
|
|
def speech(audio): |
|
sr, y = audio |
|
y = y.astype(np.float32) |
|
y /= np.max(np.abs(y)) |
|
|
|
return transcriber({"sampling_rate": sr, "raw": y})["text"] |
|
|
|
|
|
demo = gr.Interface( |
|
transcribe, |
|
gr.Audio(sources=["microphone"]), |
|
"text", |
|
) |
|
|
|
demo.launch() |