katanaml commited on
Commit
3a282d3
·
1 Parent(s): 1d60bc0

Upload cord.py

Browse files
Files changed (1) hide show
  1. cord.py +161 -0
cord.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _CITATION = """\
9
+ @article{park2019cord,
10
+ title={CORD: A Consolidated Receipt Dataset for Post-OCR Parsing},
11
+ author={Park, Seunghyun and Shin, Seung and Lee, Bado and Lee, Junyeop and Surh, Jaeheung and Seo, Minjoon and Lee, Hwalsuk}
12
+ booktitle={Document Intelligence Workshop at Neural Information Processing Systems}
13
+ year={2019}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ https://huggingface.co/datasets/katanaml/cord
19
+ """
20
+
21
+
22
+ def normalize_bbox(bbox, width, height):
23
+ return [
24
+ int(1000 * (bbox[0] / width)),
25
+ int(1000 * (bbox[1] / height)),
26
+ int(1000 * (bbox[2] / width)),
27
+ int(1000 * (bbox[3] / height)),
28
+ ]
29
+
30
+
31
+ class CordConfig(datasets.BuilderConfig):
32
+ """BuilderConfig for CORD"""
33
+
34
+ def __init__(self, **kwargs):
35
+ """BuilderConfig for CORD.
36
+ Args:
37
+ **kwargs: keyword arguments forwarded to super.
38
+ """
39
+ super(CordConfig, self).__init__(**kwargs)
40
+
41
+
42
+ class Cord(datasets.GeneratorBasedBuilder):
43
+ """CORD dataset."""
44
+
45
+ BUILDER_CONFIGS = [
46
+ CordConfig(name="cord", version=datasets.Version("1.0.0"), description="CORD dataset"),
47
+ ]
48
+
49
+ def _info(self):
50
+ return datasets.DatasetInfo(
51
+ description=_DESCRIPTION,
52
+ features=datasets.Features(
53
+ {
54
+ "id": datasets.Value("string"),
55
+ "tokens": datasets.Sequence(datasets.Value("string")),
56
+ "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
57
+ "ner_tags": datasets.Sequence(
58
+ datasets.features.ClassLabel(
59
+ names=['O',
60
+ 'menu.cnt',
61
+ 'menu.discountprice',
62
+ 'menu.nm',
63
+ 'menu.num',
64
+ 'menu.price',
65
+ 'menu.sub_cnt',
66
+ 'menu.sub_nm',
67
+ 'menu.sub_price',
68
+ 'menu.unitprice',
69
+ 'sub_total.discount_price',
70
+ 'sub_total.etc',
71
+ 'sub_total.service_price',
72
+ 'sub_total.subtotal_price',
73
+ 'sub_total.tax_price',
74
+ 'total.cashprice',
75
+ 'total.changeprice',
76
+ 'total.creditcardprice',
77
+ 'total.emoneyprice',
78
+ 'total.menuqty_cnt',
79
+ 'total.menutype_cnt',
80
+ 'total.total_etc',
81
+ 'total.total_price']
82
+ )
83
+ ),
84
+ "image_path": datasets.Value("string"),
85
+ }
86
+ ),
87
+ supervised_keys=None,
88
+ homepage="https://huggingface.co/datasets/katanaml/cord",
89
+ citation=_CITATION,
90
+ )
91
+
92
+ def _split_generators(self, dl_manager):
93
+ """Returns SplitGenerators."""
94
+ downloaded_file = dl_manager.download_and_extract(
95
+ "https://huggingface.co/datasets/katanaml/cord/resolve/main/dataset.zip")
96
+
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/CORD/train/"}
100
+ ),
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/CORD/test/"}
103
+ ),
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": f"{downloaded_file}/CORD/dev/"}
106
+ ),
107
+ ]
108
+
109
+ def _generate_examples(self, filepath):
110
+ guid = -1
111
+
112
+ replacing_labels = ['menu.etc', 'menu.itemsubtotal', 'menu.sub_etc', 'menu.sub_unitprice', 'menu.vatyn',
113
+ 'void_menu.nm', 'void_menu.price', 'sub_total.othersvc_price']
114
+
115
+ logger.info("⏳ Generating examples from = %s", filepath)
116
+ ann_dir = os.path.join(filepath, "json")
117
+ img_dir = os.path.join(filepath, "image")
118
+
119
+ for file in sorted(os.listdir(ann_dir)):
120
+ guid += 1
121
+ tokens = []
122
+ bboxes = []
123
+ ner_tags = []
124
+
125
+ file_path = os.path.join(ann_dir, file)
126
+ with open(file_path, "r", encoding="utf8") as f:
127
+ data = json.load(f)
128
+
129
+ image_path = os.path.join(img_dir, file)
130
+ image_path = image_path.replace("json", "png")
131
+
132
+ width, height = data["meta"]["image_size"]["width"], data["meta"]["image_size"]["height"]
133
+
134
+ for item in data["valid_line"]:
135
+ for word in item['words']:
136
+ # get word
137
+ txt = word['text']
138
+
139
+ # get bounding box
140
+ x1 = word['quad']['x1']
141
+ y1 = word['quad']['y1']
142
+ x3 = word['quad']['x3']
143
+ y3 = word['quad']['y3']
144
+
145
+ box = [x1, y1, x3, y3]
146
+ box = normalize_bbox(box, width=width, height=height)
147
+
148
+ # skip empty word
149
+ if len(txt) < 1:
150
+ continue
151
+
152
+ tokens.append(txt)
153
+ bboxes.append(box)
154
+
155
+ if item['category'] in replacing_labels:
156
+ ner_tags.append('O')
157
+ else:
158
+ ner_tags.append(item['category'])
159
+
160
+ yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags,
161
+ "image_path": image_path}