Datasets:

Languages:
Thai
ArXiv:
License:
holylovenia commited on
Commit
8c0b1cb
·
verified ·
1 Parent(s): 0e2c0f7

Upload burapha_th.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. burapha_th.py +167 -0
burapha_th.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """\
12
+ @Article{app12084083,
13
+ AUTHOR = {Onuean, Athita and Buatoom, Uraiwan and Charoenporn, Thatsanee and Kim, Taehong and Jung, Hanmin},
14
+ TITLE = {Burapha-TH: A Multi-Purpose Character, Digit, and Syllable Handwriting Dataset},
15
+ JOURNAL = {Applied Sciences},
16
+ VOLUME = {12},
17
+ YEAR = {2022},
18
+ NUMBER = {8},
19
+ ARTICLE-NUMBER = {4083},
20
+ URL = {https://www.mdpi.com/2076-3417/12/8/4083},
21
+ ISSN = {2076-3417},
22
+ DOI = {10.3390/app12084083}
23
+ }
24
+ """
25
+ _DATASETNAME = "burapha_th"
26
+
27
+ _DESCRIPTION = """\
28
+ The dataset has 68 character classes, 10 digit classes, and 320 syllable classes.
29
+ For constructing the dataset, 1072 Thai native speakers wrote on collection datasheets
30
+ that were then digitized using a 300 dpi scanner.
31
+ De-skewing, detection box and segmentation algorithms were applied to the raw scans
32
+ for image extraction. The dataset, unlike all other known Thai handwriting datasets, retains
33
+ existing noise, the white background, and all artifacts generated by scanning.
34
+ """
35
+
36
+ _HOMEPAGE = "https://services.informatics.buu.ac.th/datasets/Burapha-TH/"
37
+
38
+ _LICENSE = Licenses.UNKNOWN.value
39
+
40
+ _LOCAL = False
41
+ _LANGUAGES = ["tha"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
42
+
43
+ _URLS = {
44
+ "character": {"test": "https://services.informatics.buu.ac.th/datasets/Burapha-TH/character/20210306-test.zip", "train": "https://services.informatics.buu.ac.th/datasets/Burapha-TH/character/20210306-train.zip"},
45
+ "digit": {"test": "https://services.informatics.buu.ac.th/datasets/Burapha-TH/digit/20210307-test.zip", "train": "https://services.informatics.buu.ac.th/datasets/Burapha-TH/digit/20210307-train.zip"},
46
+ "syllable": {"test": "https://services.informatics.buu.ac.th/datasets/Burapha-TH/syllable/20210309-test-ori.zip", "train": "https://services.informatics.buu.ac.th/datasets/Burapha-TH/syllable/20210309-train-ori.zip"},
47
+ }
48
+
49
+ _SUPPORTED_TASKS = [Tasks.IMAGE_CAPTIONING]
50
+ _SOURCE_VERSION = "1.0.0"
51
+
52
+ _SEACROWD_VERSION = "2024.06.20"
53
+
54
+ _SUBSETS = ["character", "digit", "syllable"]
55
+
56
+
57
+ def config_constructor(subset: str, schema: str, version: str) -> SEACrowdConfig:
58
+ return SEACrowdConfig(
59
+ name=f"{_DATASETNAME}_{subset}_{schema}",
60
+ version=version,
61
+ description=f"{_DATASETNAME} {subset} {schema} schema",
62
+ schema=f"{schema}",
63
+ subset_id=f"{_DATASETNAME}_{subset}",
64
+ )
65
+
66
+
67
+ class BuraphaThDataset(datasets.GeneratorBasedBuilder):
68
+ """
69
+ The dataset has 68 character classes, 10 digit classes, and 320 syllable classes.
70
+ For constructing the dataset, 1072 Thai native speakers wrote on collection datasheets
71
+ that were then digitized using a 300 dpi scanner.
72
+ De-skewing, detection box and segmentation algorithms were applied to the raw scans for
73
+ image extraction. The dataset, unlike all other known Thai handwriting datasets, retains
74
+ existing noise, the white background, and all artifacts generated by scanning.
75
+ """
76
+
77
+ BUILDER_CONFIGS = [config_constructor(subset, "source", _SOURCE_VERSION) for subset in _SUBSETS]
78
+ BUILDER_CONFIGS.extend([config_constructor(subset, "seacrowd_imtext", _SEACROWD_VERSION) for subset in _SUBSETS])
79
+
80
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_digit_source"
81
+
82
+ label_chr_dig = [str(i).zfill(2) for i in range(78)]
83
+ label_syl = [str(i).zfill(3) for i in range(320)]
84
+
85
+ def _info(self) -> datasets.DatasetInfo:
86
+ task = self.config.subset_id.split("_")[2]
87
+ if self.config.schema == "source":
88
+ features = datasets.Features(
89
+ {"id": datasets.Value("string"), "image_paths": datasets.Value("string"), "label": datasets.Sequence(datasets.ClassLabel(names=self.label_chr_dig if task == "character" or task == "digit" else self.label_syl))}
90
+ )
91
+ elif self.config.schema == "seacrowd_imtext":
92
+ features = schemas.image_text_features(label_names=self.label_chr_dig if task == "character" or task == "digit" else self.label_syl)
93
+ else:
94
+ raise NotImplementedError()
95
+
96
+ return datasets.DatasetInfo(
97
+ description=_DESCRIPTION,
98
+ features=features,
99
+ homepage=_HOMEPAGE,
100
+ license=_LICENSE,
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
105
+ """Returns SplitGenerators."""
106
+
107
+ task = self.config.subset_id.split("_")[2]
108
+
109
+ _local_path = dl_manager.download_and_extract(_URLS[task])
110
+ train_path, test_path = _local_path["train"], _local_path["test"]
111
+ if task in ["character", "digit"]:
112
+ train_path = os.path.join(train_path, "train")
113
+ test_path = os.path.join(test_path, "test")
114
+ # for "syllable" type task
115
+ else:
116
+ train_path = os.path.join(train_path, "train-ori")
117
+ test_path = os.path.join(test_path, "test-ori")
118
+
119
+ data_pair = {}
120
+
121
+ for dir_name in os.listdir(train_path):
122
+ dir_name_split = dir_name.split("-")
123
+ file_names = []
124
+
125
+ for file_name in os.listdir(os.path.join(train_path, dir_name)):
126
+ file_names.append(os.path.join(train_path, dir_name, file_name))
127
+
128
+ label = dir_name_split[0]
129
+ data_pair[label] = file_names
130
+
131
+ return [
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.TRAIN,
134
+ gen_kwargs={
135
+ "filepath": data_pair,
136
+ "split": "train",
137
+ },
138
+ ),
139
+ datasets.SplitGenerator(
140
+ name=datasets.Split.TEST,
141
+ gen_kwargs={
142
+ "filepath": data_pair,
143
+ "split": "test",
144
+ },
145
+ ),
146
+ ]
147
+
148
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
149
+ """Yields examples as (key, example) tuples."""
150
+ task = self.config.subset_id.split("_")[2]
151
+ counter = 0
152
+
153
+ for key, imgs in filepath.items():
154
+ for img in imgs:
155
+ if self.config.schema == "source":
156
+ yield counter, {"id": str(counter), "image_paths": img, "label": [self.label_chr_dig.index(key) if task == "character" or task == "digit" else self.label_syl.index(key)]}
157
+ elif self.config.schema == "seacrowd_imtext":
158
+ yield counter, {
159
+ "id": str(counter),
160
+ "image_paths": [img],
161
+ "texts": None,
162
+ "metadata": {
163
+ "context": None,
164
+ "labels": [self.label_chr_dig.index(key) if task in ["character", "digit"] else self.label_syl.index(key)],
165
+ },
166
+ }
167
+ counter += 1