File size: 8,219 Bytes
9875014
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49cf593
9875014
 
 
 
 
 
 
 
49cf593
 
9875014
 
 
 
 
49cf593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9875014
49cf593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9875014
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd969e9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9875014
 
 
dd969e9
 
9875014
dd969e9
 
9875014
 
dd969e9
9875014
 
dd969e9
9875014
 
 
 
 
 
10355f2
 
 
 
 
 
 
 
 
9875014
 
 
dd969e9
 
9875014
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49cf593
 
 
 
 
 
 
 
 
 
 
 
 
 
9875014
49cf593
9875014
 
 
 
49cf593
9875014
 
 
 
 
 
 
49cf593
 
 
dd969e9
 
9875014
49cf593
 
 
 
dd969e9
9875014
 
 
 
49cf593
 
9875014
 
49cf593
 
 
dd969e9
 
 
 
 
 
10355f2
dd969e9
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DUDE dataset loader"""

import os
import copy
import json
from typing import List
import pdf2image
from tqdm import tqdm


import datasets


_CITATION = """
@inproceedings{dude2023icdar,
    title={ICDAR 2023 Challenge on Document UnderstanDing of Everything (DUDE)},
    author={Van Landeghem, Jordy et . al.},
    booktitle={Proceedings of the ICDAR},
    year={2023}
}
"""

_DESCRIPTION = """\
DUDE requires models to reason and understand about document layouts in multi-page images/PDFs to answer questions about them.
Specifically, models need to incorporate a new modality of layout present in the images/PDFs and reason
over it to answer DUDE questions. 
"""  # DUDE Contains X questions and Y and ...

_HOMEPAGE = "https://rrc.cvc.uab.es/?ch=23"

_LICENSE = "CC BY 4.0"

_SPLITS = ["train", "val"]

_URLS = {
    # "binaries": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_binaries.tar.gz", #
    # "annotations": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_dataset-sample_gt.json" #"
    "binaries": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_train-val-test_binaries.tar.gz",  # DUDE_train-val-test_binaries.tar.gz
    "annotations": "https://zenodo.org/record/7600505/files/DUDE_gt_release-candidate_trainval_exabs.json?download=1",
}

#'0017b64bd017f06db47e56a6a113e22e'
SKIP_DOC_IDS = ["ef03364aa27a0987c9870472e312aceb", "5c5a5880e6a73b4be2315d506ab0b15b"]


def parse_bbox(bbox):

    if bbox in [[], [[]]]:
        return None

    answers_page_bounding_boxes = []

    if isinstance(bbox[0], list):
        bbox = bbox[0]

    keys = ["left", "top", "width", "height", "page"]

    for page_bb in bbox:
        if len(page_bb) == 0:
            continue
        page_bb = {key: page_bb[key] for key in keys}
        """
        if page_bb.get("label"):
            del page_bb["label"]
        if page_bb.get("error"):
            del page_bb["error"]
        if page_bb.get("multipage_box"):
            del page_bb["multipage_box"]
        #assert all(key in page_bb for key in keys)
        """
        answers_page_bounding_boxes.append(page_bb)
    return answers_page_bounding_boxes


def batched_conversion(pdf_file):
    info = pdf2image.pdfinfo_from_path(pdf_file, userpw=None, poppler_path=None)
    maxPages = info["Pages"]

    logger.info(f"{pdf_file} has {str(maxPages)} pages")

    images = []

    for page in range(1, maxPages + 1, 10):
        images.extend(
            pdf2image.convert_from_path(
                pdf_file, dpi=200, first_page=page, last_page=min(page + 10 - 1, maxPages)
            )
        )
    return images


def open_pdf_binary(pdf_file):
    with open(pdf_file, "rb") as f:
        return f.read()


class DUDEConfig(datasets.BuilderConfig):
    """BuilderConfig for DUDE."""

    def __init__(
        self,
        binary_mode: bool,
        **kwargs,
    ):
        """BuilderConfig for DUDE.
        Args:
          binary_mode: `boolean`, load binary PDFs/OCR or pass along paths on local file system
          **kwargs: keyword arguments forwarded to super.
        """
        BINARY_MODE = False        
        super(DUDEConfig, self).__init__(description=_DESCRIPTION, **kwargs)
        self.binary_mode = binary_mode or BINARY_MODE


class DUDE(datasets.GeneratorBasedBuilder):
    """DUDE dataset."""

    VERSION = datasets.Version("0.0.1")

    BUILDER_CONFIGS = [
        DUDEConfig(name='DUDE', version=VERSION, binary_mode=False),
        DUDEConfig(name='DUDE-binary', version=VERSION, binary_mode=True)
    ]

    DEFAULT_CONFIG_NAME = "DUDE" #for some reason not working


    def _info(self):
        features = datasets.Features(
            {
                "docId": datasets.Value("string"),
                "questionId": datasets.Value("string"),
                "question": datasets.Value("string"),
                "answers": datasets.Sequence(datasets.Value("string")),
                "answers_page_bounding_boxes": datasets.Sequence(
                    {
                        "left": datasets.Value("int32"),
                        "top": datasets.Value("int32"),
                        "width": datasets.Value("int32"),
                        "height": datasets.Value("int32"),
                        "page": datasets.Value("int32"),
                    }
                ),
                "answers_variants": datasets.Sequence(datasets.Value("string")),
                "answer_type": datasets.Value("string"),
                "data_split": datasets.Value("string"),
                "document": datasets.Value("binary") if self.config.binary_mode else datasets.Value("string"),
                "OCR": datasets.Value("binary") if self.config.binary_mode else datasets.Value("string"),
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(
        self, dl_manager: datasets.DownloadManager
    ) -> List[datasets.SplitGenerator]:

        annotations = json.load(open(_URLS[f"annotations"], "r"))
        # binaries_archive = dl_manager.iter_archive(binaries_path)

        if self.config.data_dir: #unpacked it to a custom directory
            binary_extraction_path = self.config.data_dir
        else:
            binaries_path = dl_manager.download(_URLS['binaries'])
            binary_extraction_path = dl_manager.extract(binaries_path)
            binary_extraction_path += "/home/jordy/Downloads/" + _URLS["binaries"].split("/")[
                -1
            ].replace(
                ".tar.gz", ""
            )  # weird unpacking behaviour

        splits = []
        for split in _SPLITS:  # split archive
            splits.append(
                datasets.SplitGenerator(
                    name=split,
                    gen_kwargs={
                        "binary_extraction_path": binary_extraction_path,
                        "annotations": annotations,
                        "split": split,
                    },
                )
            )
        return splits

    def _generate_examples(self, binary_extraction_path, annotations, split):
        def retrieve_doc(docid):
            extracted_path = os.path.join(binary_extraction_path, "PDF", split, docid + ".pdf")
            return extracted_path


        def retrieve_OCR(docid, ocr_engine="Amazon", format="original"):
            extracted_path = os.path.join(
                binary_extraction_path, "OCR", ocr_engine, docid + f"_{format}.json"
            )
            return extracted_path

        question = self.info.features["question"]
        answers = self.info.features["answers"]

        annotations = [x for x in annotations if x["data_split"] == split]

        for i, a in enumerate(annotations):
            a["data_split"] = split
            if a["docId"] in SKIP_DOC_IDS:
                continue
            a["answers_page_bounding_boxes"] = parse_bbox(a["answers_page_bounding_boxes"])
            docpath = retrieve_doc(a["docId"])
            ocrpath = retrieve_OCR(a["docId"])
            if self.config.binary_mode:
                with open(docpath, "rb") as f, open(ocrpath, "rb") as g:
                    a["document"] = f.read()
                    a["OCR"] = g.read()
            else:
                a["document"] = docpath
                a["OCR"] = ocrpath   
            yield i, a