|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""DUDE dataset loader""" |
|
|
|
import os |
|
from pathlib import Path |
|
import time |
|
import copy |
|
import json |
|
import numpy as np |
|
import pandas as pd |
|
from tqdm import tqdm |
|
from io import BytesIO |
|
|
|
tqdm.pandas() |
|
from joblib import Parallel, delayed |
|
|
|
|
|
import PyPDF2 |
|
|
|
from datasets import load_dataset_builder, load_dataset |
|
from PIL import Image as PIL_Image |
|
|
|
|
|
MAX_PAGES = 50 |
|
MAX_PDF_SIZE = 100000000 |
|
MIN_WIDTH, MIN_HEIGHT = 150, 150 |
|
|
|
|
|
def load_json(json_path): |
|
return json.load(open(json_path, "r")) |
|
|
|
|
|
def save_json(json_path, data): |
|
with open(json_path, "w") as f: |
|
json.dump(data, f) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def pdf_to_images(document_filepath): |
|
def images_to_pagenames(images, document_filepath, page_image_dir): |
|
"/home/jordy/Downloads/DUDE_train-val-test_binaries/PDF/val/001d6f557c342ef5a67cd38a29da9e83.pdf" |
|
|
|
page_image_names = [] |
|
for page_idx, page_image in enumerate(images): |
|
page_image_name = document_filepath.replace("PDF", "images").replace( |
|
".pdf", f"_{page_idx}.jpg" |
|
) |
|
|
|
|
|
|
|
if not os.path.exists(page_image_name): |
|
page_image.save(page_image_name) |
|
return page_image_names |
|
|
|
example = {} |
|
example["document"] = document_filepath |
|
example["pages"] = 0 |
|
example["page_image_names"] = [] |
|
images = [] |
|
|
|
page_image_dir = "/".join(example["document"].split("/")[:-1]).replace("PDF", "images") |
|
if not os.path.exists(page_image_dir): |
|
os.makedirs(page_image_dir) |
|
|
|
|
|
|
|
|
|
try: |
|
reader = PyPDF2.PdfReader(example["document"]) |
|
except Exception as e: |
|
logger.warning(f"read_pdf {e}") |
|
return example |
|
|
|
reached_page_limit = False |
|
page_iterator = reader.pages |
|
|
|
for p, page in enumerate(page_iterator): |
|
if reached_page_limit: |
|
break |
|
for image in page.images: |
|
|
|
|
|
|
|
if len(images) == MAX_PAGES: |
|
reached_page_limit = True |
|
break |
|
|
|
im = PIL_Image.open(BytesIO(image.data)) |
|
if im.width < MIN_WIDTH and im.height < MIN_HEIGHT: |
|
continue |
|
images.append(im) |
|
example["pages"] = len(images) |
|
if len(images) == 0: |
|
return example |
|
|
|
example["page_image_names"] = images_to_pagenames(images, example["document"], page_image_dir) |
|
|
|
return example |
|
|
|
|
|
def pdf_to_images_block(document_paths_blocks): |
|
new_doc_metadata = {} |
|
for document_filepath in document_paths_blocks: |
|
docId = document_filepath.split("/")[-1].replace(".pdf", "") |
|
new_doc_metadata[docId] = pdf_to_images(document_filepath) |
|
return new_doc_metadata |
|
|
|
|
|
""" |
|
def get_document_metadata(docs_metadata, doc_id, document_filepath): |
|
|
|
if doc_id in docs_metadata and docs_metadata[doc_id]["num_pages"] != -1: |
|
num_pages = docs_metadata[doc_id]["num_pages"] |
|
page_image_names = docs_metadata[doc_id]["page_image_names"] |
|
|
|
else: |
|
try: |
|
images = pdf2image.convert_from_path(document_filepath) |
|
except: |
|
print(doc_id) |
|
return -1, -1 |
|
num_pages = len(images) |
|
page_image_dir = ("/").join( |
|
document_filepath.replace(documents_dir, page_images_dir).split("/")[:-1] |
|
) |
|
if not os.path.exists(page_image_dir): |
|
os.makedirs(page_image_dir) |
|
|
|
page_image_names = [] |
|
for page_idx, page_image in enumerate(images): |
|
page_image_name = document_filepath.replace(documents_dir, page_images_dir).replace( |
|
".pdf", f"_{page_idx}.jpg" |
|
) |
|
page_image_names.append(page_image_name.replace(page_images_dir, "")) |
|
|
|
if not os.path.exists(page_image_name): |
|
page_image.save(page_image_name) |
|
|
|
return num_pages, page_image_names |
|
|
|
|
|
def get_document_metadata_block(docs_metadata, documents_path_dict, documents_blocks): |
|
new_doc_metadata = {} |
|
for doc_id in documents_blocks: |
|
document_filepath = documents_path_dict[doc_id] |
|
num_pages, page_image_names = get_document_metadata( |
|
docs_metadata, doc_id, document_filepath |
|
) |
|
new_doc_metadata[doc_id] = {"num_pages": num_pages, "page_image_names": page_image_names} |
|
|
|
return new_doc_metadata |
|
""" |
|
|
|
|
|
def parse_textract_bbox(box): |
|
|
|
return np.array([box["Left"], box["Width"], box["Top"], box["Height"]]) |
|
|
|
|
|
def parse_azure_box(box, page_width, page_height): |
|
|
|
|
|
left = min(box[0], box[6]) |
|
right = max(box[2], box[4]) |
|
top = min(box[1], box[3]) |
|
bottom = max(box[5], box[7]) |
|
width = right - left |
|
height = bottom - top |
|
|
|
|
|
left = left / page_width |
|
top = top / page_height |
|
width = width / page_width |
|
height = height / page_height |
|
|
|
return [left, width, top, height] |
|
|
|
|
|
def get_ocr_information(ocr_path, num_pages): |
|
ocr_info = load_json(ocr_path) |
|
|
|
|
|
page_ocr_tokens = [[] for page_ix in range(num_pages)] |
|
page_ocr_boxes = [[] for page_ix in range(num_pages)] |
|
for ocr_block in ocr_info: |
|
for ocr_extraction in ocr_block["Blocks"]: |
|
if ocr_extraction["BlockType"] == "WORD": |
|
text = ocr_extraction["Text"].lower() |
|
bounding_box = parse_textract_bbox(ocr_extraction["Geometry"]["BoundingBox"]) |
|
page = ocr_extraction["Page"] - 1 |
|
|
|
page_ocr_tokens[page].append(text) |
|
page_ocr_boxes[page].append(bounding_box) |
|
|
|
for page in range(num_pages): |
|
page_ocr_boxes[page] = np.array(page_ocr_boxes[page]) |
|
|
|
page_ocr_boxes = page_ocr_boxes |
|
return page_ocr_tokens, page_ocr_boxes |
|
|
|
|
|
def create_header(split, version, has_answer): |
|
header = { |
|
"creation_time": time.time(), |
|
"version": version, |
|
"dataset_type": split, |
|
"has_answer": has_answer, |
|
} |
|
|
|
return header |
|
|
|
|
|
def get_document_info(documents_metadata, doc_id): |
|
doc_metadata = documents_metadata[doc_id] |
|
num_pages = doc_metadata["num_pages"] |
|
page_image_names = doc_metadata["page_image_names"] |
|
return num_pages, page_image_names |
|
|
|
|
|
def format_answers(answers_list): |
|
answers_list = list(set([answer.lower() for answer in answers_list])) |
|
return answers_list |
|
|
|
|
|
def create_imdb_record_from_json( |
|
record, documents_metadata, documents_ocr_information, split, include_answers |
|
): |
|
|
|
doc_id = record["docId"] |
|
|
|
num_pages, page_image_names = get_document_info(documents_metadata, doc_id) |
|
document_ocr_info = documents_ocr_information[doc_id] |
|
|
|
if include_answers: |
|
answers = format_answers(record["answers"]) |
|
else: |
|
answers = None |
|
|
|
imdb_record = { |
|
"question_id": record["questionId"], |
|
"question": record["question"], |
|
"docId": doc_id, |
|
"image_name": page_image_names, |
|
"num_pages": num_pages, |
|
"ocr_tokens": document_ocr_info["ocr_tokens"], |
|
"ocr_normalized_boxes": document_ocr_info["ocr_boxes"], |
|
"set_name": split, |
|
"answers": answers, |
|
"answer_page": None, |
|
"extra": { |
|
|
|
|
|
|
|
"answer_type": record["answer_type"], |
|
}, |
|
} |
|
|
|
return imdb_record |
|
|
|
|
|
def create_imdb_from_json( |
|
data, documents_metadata, documents_ocr_information, split, version, include_answers |
|
): |
|
imdb_header = create_header(split, version, include_answers) |
|
|
|
imdb_records = [] |
|
for record in tqdm(data): |
|
imdb_records.append( |
|
create_imdb_record_from_json( |
|
record, documents_metadata, documents_ocr_information, split, include_answers |
|
) |
|
) |
|
|
|
imdb = [imdb_header] + imdb_records |
|
|
|
return imdb |
|
|
|
|
|
if __name__ == "__main__": |
|
dataset = load_dataset( |
|
"../DUDE_loader/DUDE_loader.py", |
|
"DUDE", |
|
data_dir="/home/jordy/Downloads/DUDE_train-val-test_binaries", |
|
) |
|
splits = datasets.keys() |
|
|
|
split = "val" |
|
|
|
OCR_paths = [] |
|
document_paths = [] |
|
for i, x in enumerate(dataset): |
|
if x["data_split"] != split: |
|
continue |
|
if x["document"] not in document_paths: |
|
document_paths.append(x["document"]) |
|
OCR_paths.append(x["OCR"]) |
|
|
|
document_paths = document_paths[:30] |
|
OCR_paths = OCR_paths[:30] |
|
|
|
|
|
|
|
|
|
documents_metadata_filename = "documents_metadata.json" |
|
if os.path.exists(documents_metadata_filename): |
|
documents_metadata = load_json(documents_metadata_filename) |
|
else: |
|
documents_metadata = {} |
|
|
|
|
|
num_jobs = 6 |
|
block_size = int(len(document_paths) / num_jobs) + 1 |
|
print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})") |
|
documents_blocks = [ |
|
document_paths[block_size * i : block_size * i + block_size] for i in range(num_jobs) |
|
] |
|
print( |
|
"chunksize", |
|
len(set([docId for doc_block in documents_blocks for docId in doc_block])), |
|
) |
|
parallel_results = Parallel(n_jobs=num_jobs)( |
|
delayed(get_document_metadata_block)(documents_metadata, documents, document_paths[i]) |
|
for i in range(num_jobs) |
|
) |
|
|
|
for block_result in parallel_results: |
|
for docId, metadata in tqdm(block_result.items()): |
|
if docId not in documents_metadata: |
|
documents_metadata[docId] = metadata |
|
|
|
save_json(documents_metadata_filename, documents_metadata) |
|
|
|
|
|
documents_ocr_filename = "documents_ocr.json" |
|
|
|
if os.path.exists(documents_ocr_filename): |
|
documents_ocr_info = load_json(documents_ocr_filename) |
|
else: |
|
documents_ocr_info = {} |
|
no_ocr = [] |
|
error_ocr = [] |
|
|
|
for i, document_filepath in enumerate(document_paths): |
|
docId = document_filepath.split("/")[-1].replace(".pdf", "") |
|
try: |
|
ocr_tokens, ocr_boxes = get_ocr_information(OCR_paths[i], documents_metadata[docId]["num_pages"]) |
|
doc_ocr_info[docId] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes} |
|
except FileNotFoundError: |
|
no_ocr.append(docId) |
|
except KeyError: |
|
error_ocr.append(docId) |
|
|
|
save_json(documents_ocr_filename, documents_ocr_info) |
|
|
|
import pdb; pdb.set_trace() |
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
Describe all steps that need to happen after loading HF DUDE dataset |
|
Change functions |
|
|
|
|
|
page_images_dir |
|
|
|
|
|
|
|
2. Process OCR to obtain doc_ocr_info |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
train_imdb = create_imdb_from_json( |
|
train_data, |
|
documents_metadata=documents_metadata, |
|
documents_ocr_information=doc_ocr_info, |
|
split="train", |
|
version="0.1", |
|
include_answers=True, |
|
) |
|
val_imdb = create_imdb_from_json( |
|
val_data, |
|
documents_metadata=documents_metadata, |
|
documents_ocr_information=doc_ocr_info, |
|
split="train", |
|
version="0.1", |
|
include_answers=True, |
|
) |
|
np.save("Imdb/train_imdb.npy", train_imdb) |
|
np.save("Imdb/val_imdb.npy", val_imdb) |
|
|
|
document_paths = [] |
|
num_jobs = 6 |
|
block_size = int(len(document_ids) / num_jobs) + 1 |
|
print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_ids)})") |
|
parallel_results = Parallel(n_jobs=num_jobs)( |
|
delayed(get_document_metadata_block)(documents_metadata, documents, documents_blocks[i]) |
|
for i in range(num_jobs) |
|
) |
|
""" |
|
|