|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""DUDE dataset loader""" |
|
|
|
import os |
|
from pathlib import Path |
|
import time |
|
import copy |
|
import json |
|
import numpy as np |
|
import pandas as pd |
|
from tqdm import tqdm |
|
from io import BytesIO |
|
|
|
tqdm.pandas() |
|
from joblib import Parallel, delayed |
|
|
|
import pdf2image |
|
import PyPDF2 |
|
|
|
from PIL import Image as PIL_Image |
|
from datasets import load_dataset_builder, load_dataset, logging |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
PIL_Image.MAX_IMAGE_PIXELS = None |
|
|
|
MAX_PAGES = 50 |
|
MAX_PDF_SIZE = 100000000 |
|
MIN_WIDTH, MIN_HEIGHT = 150, 150 |
|
|
|
|
|
def load_json(json_path): |
|
return json.load(open(json_path, "r")) |
|
|
|
|
|
def save_json(json_path, data): |
|
with open(json_path, "w") as f: |
|
json.dump(data, f) |
|
|
|
|
|
def get_images_pdf2image(document_filepath, chunksize=10): |
|
info = pdf2image.pdfinfo_from_path(document_filepath, userpw=None, poppler_path=None) |
|
maxPages = info["Pages"] |
|
maxPages = min(MAX_PAGES, maxPages) |
|
|
|
|
|
images = [] |
|
for page in range(1, maxPages + 1, chunksize): |
|
try: |
|
images.extend( |
|
pdf2image.convert_from_path( |
|
document_filepath, |
|
first_page=page, |
|
last_page=min(page + chunksize - 1, maxPages), |
|
) |
|
) |
|
except Exception as e: |
|
logger.warning(f"page: {page} get_images {e}") |
|
return images |
|
|
|
|
|
def pdf_to_images(document_filepath, converter="PyPDF2"): |
|
def images_to_pagenames(images, document_filepath, page_image_dir): |
|
page_image_names = [] |
|
for page_idx, page_image in enumerate(images): |
|
page_image_name = document_filepath.replace("PDF", "images").replace(".pdf", f"_{page_idx}.jpg") |
|
page_image_names.append( |
|
page_image_name.replace(page_image_dir, page_image_dir.split("/")[-1]) |
|
) |
|
if not os.path.exists(page_image_name): |
|
page_image.convert("RGB").save(page_image_name) |
|
return page_image_names |
|
|
|
example = {} |
|
example["num_pages"] = 0 |
|
example["page_image_names"] = [] |
|
images = [] |
|
|
|
page_image_dir = "/".join(document_filepath.split("/")[:-1]).replace("PDF", "images") |
|
if not os.path.exists(page_image_dir): |
|
os.makedirs(page_image_dir) |
|
|
|
|
|
|
|
|
|
reached_page_limit = False |
|
|
|
if converter == "PyPDF2": |
|
try: |
|
reader = PyPDF2.PdfReader(document_filepath) |
|
except Exception as e: |
|
logger.warning(f"read_pdf {e}") |
|
return example |
|
|
|
for p, page in enumerate(reader.pages): |
|
if reached_page_limit: |
|
break |
|
try: |
|
for image in page.images: |
|
if len(images) == MAX_PAGES: |
|
reached_page_limit = True |
|
break |
|
im = PIL_Image.open(BytesIO(image.data)) |
|
if im.width < MIN_WIDTH and im.height < MIN_HEIGHT: |
|
continue |
|
images.append(im) |
|
except Exception as e: |
|
logger.warning(f"get_images {e}") |
|
|
|
elif converter == "pdf2image": |
|
images = get_images_pdf2image(document_filepath) |
|
|
|
example["num_pages"] = len(images) |
|
if len(images) == 0: |
|
return example |
|
|
|
example["page_image_names"] = images_to_pagenames(images, document_filepath, page_image_dir) |
|
|
|
return example |
|
|
|
|
|
def pdf_to_images_block(document_paths_blocks, converter): |
|
new_doc_metadata = {} |
|
for document_filepath in document_paths_blocks: |
|
docId = document_filepath.split("/")[-1].replace(".pdf", "") |
|
new_doc_metadata[docId] = pdf_to_images(document_filepath, converter=converter) |
|
return new_doc_metadata |
|
|
|
|
|
def parse_textract_bbox(box): |
|
|
|
return np.array([box["Left"], box["Width"], box["Top"], box["Height"]]) |
|
|
|
|
|
def parse_azure_box(box, page_width, page_height): |
|
|
|
|
|
left = min(box[0], box[6]) |
|
right = max(box[2], box[4]) |
|
top = min(box[1], box[3]) |
|
bottom = max(box[5], box[7]) |
|
width = right - left |
|
height = bottom - top |
|
|
|
|
|
left = left / page_width |
|
top = top / page_height |
|
width = width / page_width |
|
height = height / page_height |
|
|
|
return [left, width, top, height] |
|
|
|
|
|
def get_ocr_information(ocr_path, num_pages): |
|
ocr_info = load_json(ocr_path) |
|
|
|
ocr_pages = ocr_info[0]["DocumentMetadata"]["Pages"] |
|
|
|
if num_pages != ocr_pages and num_pages != MAX_PAGES: |
|
raise AssertionError("Pages from images and OCR not matching, should go for pdf2image") |
|
|
|
page_ocr_tokens = [[] for page_ix in range(num_pages)] |
|
page_ocr_boxes = [[] for page_ix in range(num_pages)] |
|
for ocr_block in ocr_info: |
|
for ocr_extraction in ocr_block["Blocks"]: |
|
if ocr_extraction["BlockType"] == "WORD": |
|
text = ocr_extraction["Text"].lower() |
|
bounding_box = parse_textract_bbox(ocr_extraction["Geometry"]["BoundingBox"]).tolist() |
|
page = ocr_extraction["Page"] - 1 |
|
|
|
if page >= num_pages: |
|
break |
|
|
|
page_ocr_tokens[page].append(text) |
|
page_ocr_boxes[page].append(bounding_box) |
|
|
|
""" |
|
for page in range(num_pages): |
|
page_ocr_boxes[page] = np.array(page_ocr_boxes[page]) |
|
""" |
|
return page_ocr_tokens, page_ocr_boxes |
|
|
|
|
|
def create_header(split, version, has_answer): |
|
header = { |
|
"creation_time": time.time(), |
|
"version": version, |
|
"dataset_type": split, |
|
"has_answer": has_answer, |
|
} |
|
|
|
return header |
|
|
|
|
|
def get_document_info(documents_metadata, docId): |
|
doc_metadata = documents_metadata[docId] |
|
num_pages = doc_metadata["num_pages"] |
|
page_image_names = doc_metadata["page_image_names"] |
|
return num_pages, page_image_names |
|
|
|
|
|
def format_answers(answers_list): |
|
answers_list = list(set([answer for answer in answers_list])) |
|
return answers_list |
|
|
|
|
|
def create_imdb_record_from_json( |
|
record, documents_metadata, documents_ocr_info, split, include_answers, include_variants=False |
|
): |
|
docId = record["docId"].split("_")[0] |
|
try: |
|
num_pages, page_image_names = get_document_info(documents_metadata, docId) |
|
document_ocr_info = documents_ocr_info[docId] |
|
except Exception as e: |
|
from pdb import set_trace |
|
|
|
set_trace() |
|
print( |
|
"Missing: ", |
|
e, |
|
docId, |
|
) |
|
return {} |
|
|
|
if include_answers: |
|
answers = format_answers(record["answers"]) |
|
else: |
|
answers = None |
|
|
|
if include_variants and record["answers_variants"] and not "list" in record["answer_type"]: |
|
answers += record["answers_variants"] |
|
|
|
page_image_dir = "/".join(record["document"].split("/")[:-2]).replace("PDF", "images") |
|
if not page_image_names or any([not os.path.exists(os.path.join(page_image_dir, p)) for p in page_image_names]): |
|
print( |
|
"Missing images: ", |
|
docId, |
|
|
|
) |
|
return {} |
|
|
|
|
|
|
|
|
|
|
|
|
|
imdb_record = { |
|
"question_id": record["questionId"], |
|
"question": record["question"], |
|
"docId": docId, |
|
"image_name": page_image_names, |
|
"num_pages": num_pages, |
|
"ocr_tokens": document_ocr_info["ocr_tokens"], |
|
"ocr_normalized_boxes": document_ocr_info["ocr_boxes"], |
|
"set_name": split, |
|
"answers": answers, |
|
"answer_page": None, |
|
"extra": { |
|
|
|
|
|
|
|
"answer_type": record["answer_type"], |
|
}, |
|
} |
|
|
|
return imdb_record |
|
|
|
|
|
def create_imdb_from_json( |
|
data, |
|
documents_metadata, |
|
documents_ocr_info, |
|
split, |
|
version, |
|
include_answers=True, |
|
include_variants=False, |
|
): |
|
imdb_header = create_header(split, version, include_answers) |
|
|
|
imdb_records = [] |
|
for record in tqdm(data): |
|
imdb_record = create_imdb_record_from_json( |
|
record, documents_metadata, documents_ocr_info, split, include_answers, include_variants |
|
) |
|
if imdb_record: |
|
imdb_records.append(imdb_record) |
|
|
|
imdb = [imdb_header] + imdb_records |
|
|
|
return imdb |
|
|
|
|
|
def parse_arguments(): |
|
import argparse |
|
|
|
parser = argparse.ArgumentParser(description="Instantiate HuggingFace dataloader and convert to ImDB format") |
|
|
|
parser.add_argument( |
|
"--redo-imdb-build", |
|
action="store_true", |
|
default=False, |
|
help="Whether to rebuild the imdb record and overwrite", |
|
) |
|
|
|
|
|
parser.add_argument( |
|
"--no-include-variants", |
|
action="store_false", |
|
default=True, |
|
help="Whether to include variants as full ground truths", |
|
) |
|
|
|
|
|
parser.add_argument( |
|
"--DUDE_config", |
|
type=str, |
|
default="Amazon_original", |
|
help="HF Config to load to control OCR version", |
|
) |
|
parser.add_argument( |
|
"--data_dir", |
|
type=str, |
|
default="/home/jordy/Downloads/DUDE_train-val-test_binaries", |
|
help="Load PDFs and store images", |
|
) |
|
|
|
return parser.parse_args() |
|
|
|
|
|
if __name__ == "__main__": |
|
""" |
|
Parse and redo |
|
""" |
|
args = parse_arguments() |
|
|
|
dataset = load_dataset("jordyvl/DUDE_loader", args.DUDE_config, data_dir=args.data_dir) |
|
|
|
splits = dataset.keys() |
|
|
|
for split in splits: |
|
split_indices = [] |
|
OCR_paths = [] |
|
document_paths = [] |
|
for i, x in enumerate(dataset[split]): |
|
if x["data_split"] != split: |
|
continue |
|
if x["document"] not in document_paths: |
|
document_paths.append(x["document"]) |
|
OCR_paths.append(x["OCR"]) |
|
split_indices.append(i) |
|
|
|
|
|
|
|
|
|
|
|
documents_metadata_filename = f"{split}-documents_metadata.json" |
|
if os.path.exists(documents_metadata_filename): |
|
print(f"Loading from disk: {documents_metadata_filename}") |
|
documents_metadata = load_json(documents_metadata_filename) |
|
else: |
|
documents_metadata = {} |
|
num_jobs = 6 |
|
block_size = int(len(document_paths) / num_jobs) + 1 |
|
print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})") |
|
document_blocks = [document_paths[block_size * i : block_size * i + block_size] for i in range(num_jobs)] |
|
print( |
|
"chunksize", |
|
len(set([docId for doc_block in document_blocks for docId in doc_block])), |
|
) |
|
parallel_results = Parallel(n_jobs=num_jobs)( |
|
delayed(pdf_to_images_block)(document_blocks[i], "pdf2image") for i in range(num_jobs) |
|
) |
|
|
|
for block_result in parallel_results: |
|
for docId, metadata in tqdm(block_result.items()): |
|
if docId not in documents_metadata: |
|
documents_metadata[docId] = metadata |
|
|
|
save_json(documents_metadata_filename, documents_metadata) |
|
|
|
|
|
documents_ocr_filename = f"{split}-documents_ocr.json" |
|
|
|
if os.path.exists(documents_ocr_filename): |
|
print(f"Loading from disk: {documents_ocr_filename}") |
|
documents_ocr_info = load_json(documents_ocr_filename) |
|
else: |
|
documents_ocr_info = {} |
|
no_ocr = [] |
|
error_ocr = [] |
|
|
|
for i, document_filepath in enumerate(document_paths): |
|
docId = document_filepath.split("/")[-1].replace(".pdf", "") |
|
try: |
|
ocr_tokens, ocr_boxes = get_ocr_information(OCR_paths[i], documents_metadata[docId]["num_pages"]) |
|
documents_ocr_info[docId] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes} |
|
except AssertionError as e: |
|
print(f"image2pages issue: {e}") |
|
error_ocr.append(docId) |
|
except IndexError as e: |
|
print(f"pages issue: {e}") |
|
error_ocr.append(docId) |
|
except FileNotFoundError as e: |
|
print(f"FileNotFoundError issue: {e}") |
|
no_ocr.append(docId) |
|
except KeyError as e: |
|
print(f"Keyerror issue: {e}") |
|
error_ocr.append(docId) |
|
|
|
save_json(documents_ocr_filename, documents_ocr_info) |
|
|
|
imdb_filename = f"imdb_{split}.npy" |
|
if os.path.exists(imdb_filename) and not args.redo_imdb_build: |
|
print(f"Loading from disk: {imdb_filename}") |
|
imdb = np.load(imdb_filename, allow_pickle=True) |
|
|
|
else: |
|
imdb = create_imdb_from_json( |
|
dataset[split], |
|
documents_metadata=documents_metadata, |
|
documents_ocr_info=documents_ocr_info, |
|
split=split, |
|
version="0.1", |
|
include_answers=(not split == "test"), |
|
include_variants=(not args.no_include_variants), |
|
) |
|
np.save(imdb_filename, imdb) |
|
|