benhachem commited on
Commit
3492dac
·
verified ·
1 Parent(s): 015aedb

Delete loading script

Browse files
Files changed (1) hide show
  1. KHATT.py +0 -103
KHATT.py DELETED
@@ -1,103 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import datasets
17
-
18
-
19
- _CITATION = """\
20
- @article{Pattern Recognition,
21
- Author = {bri A. Mahmoud, Irfan Ahmad, Wasfi G. Al-Khatib, Mohammad Alshayeb, Mohammad Tanvir Parvez, Volker Märgner, Gernot A. Fink},
22
- Title = { {KHATT: An Open Arabic Offline Handwritten Text Database} },
23
- Year = {2013},
24
- doi = {10.1016/j.patcog.2013.08.009},
25
- }
26
- """
27
-
28
- _HOMEPAGE = "https://khatt.ideas2serve.net/KHATTAgreement.php"
29
-
30
- _DESCRIPTION = """\
31
- KHATT (KFUPM Handwritten Arabic TexT) database is a database of unconstrained handwritten Arabic Text written by 1000 different writers. This research database’s development was undertaken by a research group from KFUPM, Dhahran, S audi Arabia headed by Professor Sabri Mahmoud in collaboration with Professor Fink from TU-Dortmund, Germany and Dr. Märgner from TU-Braunschweig, Germany.
32
- """
33
-
34
- _DATA_URL = {
35
- "train": [
36
- "https://huggingface.co/datasets/benhachem/KHATT/resolve/main/data/train.zip"
37
- ],
38
- "validation": [
39
- "https://huggingface.co/datasets/benhachem/KHATT/resolve/main/data/validation.zip"
40
- ],
41
-
42
- }
43
-
44
-
45
- class KHATT(datasets.GeneratorBasedBuilder):
46
- VERSION = datasets.Version("1.0.0")
47
-
48
- def _info(self):
49
- return datasets.DatasetInfo(
50
- description=_DESCRIPTION,
51
- features=datasets.Features(
52
- {
53
- "image": datasets.Image(),
54
- "text": datasets.Value("string"),
55
- }
56
- ),
57
- homepage=_HOMEPAGE,
58
- citation=_CITATION,
59
- )
60
-
61
- def _split_generators(self, dl_manager):
62
- """Returns SplitGenerators."""
63
- archives = dl_manager.download(_DATA_URL)
64
-
65
- return [
66
- datasets.SplitGenerator(
67
- name=datasets.Split.TRAIN,
68
- gen_kwargs={
69
- "archives": [dl_manager.iter_archive(archive) for archive in archives["train"]],
70
- "split": "train",
71
- },
72
- ),
73
- datasets.SplitGenerator(
74
- name=datasets.Split.VALIDATION,
75
- gen_kwargs={
76
- "archives": [dl_manager.iter_archive(archive) for archive in archives["validation"]],
77
- "split": "validation",
78
- },
79
- ),
80
- ]
81
- def _generate_examples(self, archives, split):
82
- """Yields examples."""
83
- idx = 0
84
-
85
- for archive in archives:
86
- for path, file in archive:
87
- # If we have an image
88
- if path.endswith(".tif"):
89
- if split != "test":
90
- img_file = file
91
- else:
92
- text = ""
93
-
94
- elif path.endswith(".txt"):
95
-
96
- text = file.read()
97
- text = text.decode('utf-8')
98
-
99
- ex = {"image": {"path": path, "bytes": img_file.read()}, "text": text}
100
-
101
- yield idx, ex
102
-
103
- idx += 1