Datasets:

Modalities:
Text
Formats:
csv
ArXiv:
Libraries:
Datasets
pandas
License:
kargaranamir commited on
Commit
dc04555
1 Parent(s): a1b1cac
Files changed (11) hide show
  1. GlotSparse.py +152 -0
  2. balochi.csv +0 -3
  3. brahui.csv +0 -3
  4. fanti.csv +0 -0
  5. gilaki.csv +0 -0
  6. gurani.csv +0 -3
  7. kirmanjki.csv +0 -3
  8. south-azerbaijani.csv +0 -3
  9. southern-kurdish.csv +0 -3
  10. southern-uzbek.csv +0 -3
  11. twi.csv +0 -0
GlotSparse.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The GlotSprase Authors.
3
+ # Lint as: python3
4
+ """
5
+ GlotSprase
6
+ """
7
+
8
+ """ This dataset loading script is built based on Hugging Face tutorial and OSCAR-2301's and CulturaX dataset script. """
9
+
10
+ import os
11
+ import collections
12
+
13
+ import pandas as pd
14
+
15
+ import datasets
16
+
17
+ logger = datasets.logging.get_logger(__name__)
18
+
19
+ _DESCRIPTION = """\
20
+ GlotSprase \
21
+ """
22
+
23
+ _URL = "https://huggingface.co/datasets/kargaranamir/GlotSparse"
24
+
25
+ _LICENSE = """
26
+ We do not own any of the text from which these data has been extracted.
27
+ We license the actual packaging, the metadata and the annotations of these data under the CC BY 4.0.
28
+
29
+ If you are a website/dataset owner and do not want your data to be included in this corpra, please send us an email at [email protected] .
30
+ """
31
+
32
+ _CITATION = r"""\
33
+ @misc{GlotSparse,
34
+ author = {Kargaran, Amir Hossein},
35
+ title = {GlotSparse Corpus},
36
+ year = {2023},
37
+ publisher = {Github},
38
+ journal = {Github Repository},
39
+ howpublished = {{\\url{https://github.com/kargaranamir/GlotSparse}}},
40
+ }
41
+ """
42
+
43
+ _BASE_DATA_PAT_FORMAT_STR = "{language}/{language}.csv"
44
+ _BASE_CHECKSUM_FILE_NAME = "checksum.sha256"
45
+
46
+
47
+ def _languages():
48
+ """Create the sorted dictionary of language codes, and language names.
49
+ Returns:
50
+ The sorted dictionary as an instance of `collections.OrderedDict`.
51
+ """
52
+ langs = {
53
+ "Balochi_Arab": "bal_Arab",
54
+ "Twi_Latn": "twi_Latn",
55
+ "Fanti_Latn": "fat_Latn",
56
+ "South-Azerbaijani_Arab": "azb_Arab",
57
+ "Southern-Kurdish_Arab": "sdh_Arab",
58
+ "Gurani-Arab": "hac_Arab",
59
+ "Southern-Uzbek": "uzs_Arab",
60
+ "Kirmanjki-Latn": "kiu-Latn",
61
+ "Southern-Uzbek_Arab": "uzs_Arab",
62
+ "Gilaki": "glk_Arab",
63
+ }
64
+
65
+ langs = {v: k for k, v in langs.items()}
66
+ return collections.OrderedDict(sorted(langs.items()))
67
+
68
+
69
+ class GlotConfig(datasets.BuilderConfig):
70
+ """GlotSprase corpus."""
71
+
72
+ def __init__(self, language: str, **kwargs):
73
+ """BuilderConfig for GlotSprase.
74
+ Args:
75
+ language (str): It has to contain 3-letter coded strings following the writing script with an underline in between. For example: "glk_Arab", "fat_Latn".
76
+ **kwargs: Keyword arguments forwarded to super.
77
+ """
78
+ # Validate the language.
79
+ if language not in _languages():
80
+ raise ValueError("Invalid language: %s " % language)
81
+
82
+ name = f"{language}"
83
+ description = (
84
+ f"Original {_languages()[language]} GlotSprase dataset from 2023"
85
+ )
86
+ super(GlotConfig, self).__init__(
87
+ name=name, description=description, **kwargs
88
+ )
89
+
90
+ # Additional attributes
91
+ self.language = language
92
+ self.base_data_path = _BASE_DATA_PAT_FORMAT_STR.format(language=language)
93
+
94
+
95
+ class Glot(datasets.ArrowBasedBuilder):
96
+ """GlotSprase"""
97
+
98
+ BUILDER_CONFIGS = [
99
+ GlotConfig( # pylint: disable=g-complex-comprehension
100
+ language=language,
101
+ version=datasets.Version("1.0.0"),
102
+ )
103
+ for language in _languages()
104
+ ]
105
+ BUILDER_CONFIG_CLASS = GlotConfig
106
+
107
+ def _info(self):
108
+ return datasets.DatasetInfo(
109
+ description=_DESCRIPTION,
110
+ features=datasets.Features(
111
+ {
112
+ "Source": datasets.Value("string"),
113
+ "Content": datasets.Value("string"),
114
+ "Length": datasets.Value("int64"),
115
+ "Script": datasets.Value("string"),
116
+ "ISO639-3": datasets.Value("string"),
117
+ "Language": datasets.Value("string"),
118
+ }
119
+ ),
120
+ supervised_keys=None,
121
+ homepage=_URL,
122
+ citation=_CITATION,
123
+ license=_LICENSE,
124
+ )
125
+
126
+ def _split_generators(self, dl_manager):
127
+
128
+ data_urls = [self.config.base_data_path]
129
+ doc_files = dl_manager.download(
130
+ [url for url in data_urls if url.endswith(".csv")]
131
+ )
132
+ return [
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.TRAIN, gen_kwargs={"doc_files": doc_files}
135
+ ),
136
+ ]
137
+
138
+
139
+ def _generate_tables(self, doc_files):
140
+ """This function returns the data by iterating on all the files."""
141
+ for doc_i, doc_path in enumerate(doc_files):
142
+ df = pd.read_csv(doc_path)
143
+
144
+ for index, row in df.iterrows():
145
+ yield f"{doc_i}_{index}", {
146
+ "ISO639-3": row["ISO639-3"],
147
+ "Language": row["Language"],
148
+ "Content": row["Content"],
149
+ "Script": row["Script"],
150
+ "Length": row["Length"],
151
+ "Source": row["Source"],
152
+ }
balochi.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a59eeb9cfce3e145d0782211e0810e0c2ac6cd9991be80cec8723e367268c0d
3
- size 14310496
 
 
 
 
brahui.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:919b4f319bfffa0f832be117ef0ffaf7008359ae5841087e9ff75b23047f2d7a
3
- size 12757409
 
 
 
 
fanti.csv DELETED
The diff for this file is too large to render. See raw diff
 
gilaki.csv DELETED
The diff for this file is too large to render. See raw diff
 
gurani.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e3605aa4cf521b497d170a2191f203c1a1015fde1a4eb2863f3d0802467fd70
3
- size 17749590
 
 
 
 
kirmanjki.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3d40054b4d71f80d9ae9ed00987cd07a8d9b11329b7a219562ea679f34fef0f
3
- size 45857180
 
 
 
 
south-azerbaijani.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:44d6c2e53090ee68035a5a98b68ac146b2f2f7e4d6e16cd848d7031adfe709d7
3
- size 40986731
 
 
 
 
southern-kurdish.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2a31cc9368360d073b2a1745fc0efa92c2f2fe610ece9547a1b86bfb8b91534
3
- size 45976710
 
 
 
 
southern-uzbek.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8b0cf458a8b11190bbd254d93106f24be92d85ae353f6ae2f170a83d1852dcf
3
- size 48041901
 
 
 
 
twi.csv DELETED
The diff for this file is too large to render. See raw diff