darentang commited on
Commit
39c9410
·
1 Parent(s): a1948c3

created generated.py file

Browse files
Files changed (1) hide show
  1. generated.py +126 -0
generated.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ import json
3
+ import os
4
+ from pathlib import Path
5
+ import datasets
6
+ from PIL import Image
7
+ # import torch
8
+ # from detectron2.data.transforms import ResizeTransform, TransformList
9
+ logger = datasets.logging.get_logger(__name__)
10
+ _CITATION = """\
11
+ @article{2019,
12
+ title={ICDAR2019 Competition on Scanned Receipt OCR and Information Extraction},
13
+ url={http://dx.doi.org/10.1109/ICDAR.2019.00244},
14
+ DOI={10.1109/icdar.2019.00244},
15
+ journal={2019 International Conference on Document Analysis and Recognition (ICDAR)},
16
+ publisher={IEEE},
17
+ author={Huang, Zheng and Chen, Kai and He, Jianhua and Bai, Xiang and Karatzas, Dimosthenis and Lu, Shijian and Jawahar, C. V.},
18
+ year={2019},
19
+ month={Sep}
20
+ }
21
+ """
22
+ _DESCRIPTION = """\
23
+ https://arxiv.org/abs/2103.10213
24
+ """
25
+
26
+
27
+ def load_image(image_path):
28
+ image = Image.open(image_path)
29
+ w, h = image.size
30
+ return image, (w, h)
31
+
32
+
33
+ def normalize_bbox(bbox, size):
34
+ return [
35
+ int(1000 * bbox[0] / size[0]),
36
+ int(1000 * bbox[1] / size[1]),
37
+ int(1000 * bbox[2] / size[0]),
38
+ int(1000 * bbox[3] / size[1]),
39
+ ]
40
+
41
+
42
+ def _get_drive_url(url):
43
+ base_url = 'https://drive.google.com/uc?id='
44
+ split_url = url.split('/')
45
+ return base_url + split_url[5]
46
+
47
+
48
+ _URLS = [
49
+ _get_drive_url(
50
+ "https://drive.google.com/file/d/1FFNNKBzBXgGc8h8Du_hxkJblgQJO3Foe/view?usp=sharing"),
51
+ ]
52
+
53
+
54
+ class SroieConfig(datasets.BuilderConfig):
55
+ """BuilderConfig for SROIE"""
56
+
57
+ def __init__(self, **kwargs):
58
+ """BuilderConfig for SROIE.
59
+ Args:
60
+ **kwargs: keyword arguments forwarded to super.
61
+ """
62
+ super(SroieConfig, self).__init__(**kwargs)
63
+
64
+
65
+ class Sroie(datasets.GeneratorBasedBuilder):
66
+ BUILDER_CONFIGS = [
67
+ SroieConfig(name="sroie", version=datasets.Version(
68
+ "1.0.0"), description="SROIE dataset"),
69
+ ]
70
+
71
+ def _info(self):
72
+ return datasets.DatasetInfo(
73
+ description=_DESCRIPTION,
74
+ features=datasets.Features(
75
+ {
76
+ "id": datasets.Value("string"),
77
+ "words": datasets.Sequence(datasets.Value("string")),
78
+ "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
79
+ "ner_tags": datasets.Sequence(
80
+ datasets.features.ClassLabel(
81
+ names=['O', 'B-ABN', 'B-BILLER', 'B-BILLER_ADDRESS', 'B-BILLER_POST_CODE', 'B-DUE_DATE',
82
+ 'B-GST', 'B-INVOICE_DATE', 'B-INVOICE_NUMBER', 'B-SUBTOTAL', 'B-TOTAL', 'I-BILLER_ADDRESS']
83
+ )
84
+ ),
85
+ # "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
86
+ "image_path": datasets.Value("string"),
87
+ }
88
+ ),
89
+ supervised_keys=None,
90
+ citation=_CITATION,
91
+ homepage="https://arxiv.org/abs/2103.10213",
92
+ )
93
+
94
+ def _split_generators(self, dl_manager):
95
+ """Returns SplitGenerators."""
96
+ """Uses local files located with data_dir"""
97
+ downloaded_file = dl_manager.download_and_extract(_URLS)
98
+ # move files from the second URL together with files from the first one.
99
+ dest = Path(downloaded_file[0])/"sroie"
100
+
101
+ return [
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TRAIN, gen_kwargs={
104
+ "filepath": dest/"train"}
105
+ ),
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TEST, gen_kwargs={"filepath": dest/"test"}
108
+ ),
109
+ ]
110
+
111
+ def _generate_examples(self, filepath):
112
+ logger.info("⏳ Generating examples from = %s", filepath)
113
+ ann_dir = os.path.join(filepath, "tagged")
114
+ img_dir = os.path.join(filepath, "images")
115
+ for guid, fname in enumerate(sorted(os.listdir(img_dir))):
116
+ name, ext = os.path.splitext(fname)
117
+ file_path = os.path.join(ann_dir, name + ".json")
118
+ with open(file_path, "r", encoding="utf8") as f:
119
+ data = json.load(f)
120
+ image_path = os.path.join(img_dir, fname)
121
+
122
+ image, size = load_image(image_path)
123
+
124
+ boxes = [normalize_bbox(box, size) for box in data["bbox"]]
125
+
126
+ yield guid, {"id": str(guid), "words": data["words"], "bboxes": boxes, "ner_tags": data["labels"], "image_path": image_path}