holylovenia commited on
Commit
d21b08a
·
1 Parent(s): da0e2aa

Upload indspeech_newstra_ethnicsr.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indspeech_newstra_ethnicsr.py +219 -0
indspeech_newstra_ethnicsr.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from itertools import chain
18
+ from pathlib import Path
19
+ from random import sample
20
+ from typing import Dict, List, Tuple
21
+
22
+ import datasets
23
+
24
+ from nusacrowd.utils import schemas
25
+ from nusacrowd.utils.configs import NusantaraConfig
26
+ from nusacrowd.utils.constants import Tasks
27
+
28
+ _CITATION = """\
29
+ @inproceedings{sakti-cocosda-2013,
30
+ title = "Towards Language Preservation: Design and Collection of Graphemically Balanced and Parallel Speech Corpora of {I}ndonesian Ethnic Languages",
31
+ author = "Sakti, Sakriani and Nakamura, Satoshi",
32
+ booktitle = "Proc. Oriental COCOSDA",
33
+ year = "2013",
34
+ address = "Gurgaon, India"
35
+ }
36
+
37
+ @inproceedings{sakti-sltu-2014,
38
+ title = "Recent progress in developing grapheme-based speech recognition for {I}ndonesian ethnic languages: {J}avanese, {S}undanese, {B}alinese and {B}ataks",
39
+ author = "Sakti, Sakriani and Nakamura, Satoshi",
40
+ booktitle = "Proc. 4th Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU 2014)",
41
+ year = "2014",
42
+ pages = "46--52",
43
+ address = "St. Petersburg, Russia"
44
+ }
45
+
46
+ @inproceedings{novitasari-sltu-2020,
47
+ title = "Cross-Lingual Machine Speech Chain for {J}avanese, {S}undanese, {B}alinese, and {B}ataks Speech Recognition and Synthesis",
48
+ author = "Novitasari, Sashi and Tjandra, Andros and Sakti, Sakriani and Nakamura, Satoshi",
49
+ booktitle = "Proc. Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)",
50
+ year = "2020",
51
+ pages = "131--138",
52
+ address = "Marseille, France"
53
+ }
54
+ """
55
+
56
+ _DATASETNAME = "indspeech_newstra_ethnicsr"
57
+ _DESCRIPTION = """\
58
+ INDspeech_NEWSTRA_EthnicSR is a collection of graphemically balanced and parallel speech corpora of four major Indonesian ethnic languages: Javanese, Sundanese, Balinese, and Bataks. It was developed in 2013 by the Nara Institute of Science and Technology (NAIST, Japan) [Sakti et al., 2013]. The data has been used to develop Indonesian ethnic speech recognition in supervised learning [Sakti et al., 2014] and semi-supervised learning [Novitasari et al., 2020] based on Machine Speech Chain framework [Tjandra et al., 2020].
59
+ """
60
+
61
+ _HOMEPAGE = "https://github.com/s-sakti/data_indsp_newstra_ethnicsr"
62
+ _LANGUAGES = ["sun", "jav", "btk", "ban"]
63
+ _LOCAL = False
64
+ _LICENSE = "CC-BY-NC-SA 4.0"
65
+
66
+ _lst_TYPE = ["traEth", "traInd"]
67
+ _lst_LANG = {"Bli": "BALI", "Btk": "BATAK", "Jaw": "JAWA", "Snd": "SUNDA"}
68
+ _lst_STD_LANG = {"ban": "Bli", "btk": "Btk", "jav": "Jaw", "sun": "Snd"}
69
+ _lst_HEAD_1_TRAIN = "https://raw.githubusercontent.com/s-sakti/data_indsp_newstra_ethnicsr/main/lst/dataset1_train_news_"
70
+ _lst_HEAD_1_TEST = ["https://raw.githubusercontent.com/s-sakti/data_indsp_newstra_ethnicsr/main/lst/dataset1_test_" + ltype + "_" for ltype in _lst_TYPE]
71
+ _lst_HEAD_2 = "https://raw.githubusercontent.com/s-sakti/data_indsp_newstra_ethnicsr/main/lst/dataset2_"
72
+ _sp_TEMPLATE = "https://raw.githubusercontent.com/s-sakti/data_indsp_newstra_ethnicsr/main/speech/16kHz/" # {lang}/Ind0{index}_{gender}_{lang_code}.zip"
73
+ _txt_TEMPLATE = "https://github.com/s-sakti/data_indsp_newstra_ethnicsr/raw/main/text/utts_transcript/" # {lang}/Ind0{index}_{gender}_{lang_code}.zip"
74
+
75
+ _URLS = {
76
+ "dataset1_train": {llang.lower(): [_lst_HEAD_1_TRAIN + llang + ".lst"] for llang in _lst_LANG},
77
+ "dataset1_test": {llang.lower(): [head1test + llang + ".lst" for head1test in _lst_HEAD_1_TEST] for llang in _lst_LANG},
78
+ "dataset2_train": {llang.lower(): [_lst_HEAD_2 + "train_news_" + llang + ".lst"] for llang in _lst_LANG},
79
+ "dataset2_test": {llang.lower(): [_lst_HEAD_2 + "test_news_" + llang + ".lst"] for llang in _lst_LANG},
80
+ "speech": {llang.lower(): [_sp_TEMPLATE + _lst_LANG[llang] + "/Ind" + str(idx).zfill(3) + "_" + ("M" if idx % 2 == 0 else "F") + "_" + llang + ".zip" for idx in range(1, 11)] for llang in _lst_LANG},
81
+ "transcript": {llang.lower(): [_txt_TEMPLATE + _lst_LANG[llang] + "/Ind" + str(idx).zfill(3) + "_" + ("M" if idx % 2 == 0 else "F") + "_" + llang + ".zip" for idx in range(1, 11)] for llang in _lst_LANG},
82
+ }
83
+
84
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
85
+
86
+ _SOURCE_VERSION = "1.0.0"
87
+ _NUSANTARA_VERSION = "1.0.0"
88
+
89
+
90
+ def nusantara_config_constructor(lang, schema, version, overlap):
91
+ if lang == "":
92
+ raise ValueError(f"Invalid lang {lang}")
93
+
94
+ if schema != "source" and schema != "nusantara_sptext":
95
+ raise ValueError(f"Invalid schema: {schema}")
96
+
97
+ return NusantaraConfig(
98
+ name="indspeech_newstra_ethnicsr_{overlap}_{lang}_{schema}".format(lang=lang, schema=schema, overlap=overlap),
99
+ version=datasets.Version(version),
100
+ description="indspeech_newstra_ethnicsr {schema} schema for {lang} language with {overlap}ping dataset".format(lang=_lst_LANG[_lst_STD_LANG[lang]], schema=schema, overlap=overlap),
101
+ schema=schema,
102
+ subset_id="indspeech_newstra_ethnicsr_{overlap}".format(overlap=overlap),
103
+ )
104
+
105
+ class INDspeechNEWSTRAEthnicSR(datasets.GeneratorBasedBuilder):
106
+ """
107
+ The dataset contains 2 sub-datasets
108
+ Dataset 1 has 2250/1000 train/test samples per language
109
+ Dataset 2 has another 1600/50 train/test per language
110
+ The 'overlap' keyword in the dataset-name combines both sub-datasets, while 'nooverlap' will only use dataset 1
111
+ """
112
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
113
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
114
+
115
+ BUILDER_CONFIGS = [nusantara_config_constructor(lang, "source", _SOURCE_VERSION, overlap) for lang in _lst_STD_LANG for overlap in ["overlap","nooverlap"]] +\
116
+ [nusantara_config_constructor(lang, "nusantara_sptext", _NUSANTARA_VERSION, overlap) for lang in _lst_STD_LANG for overlap in ["overlap","nooverlap"]]
117
+
118
+ DEFAULT_CONFIG_NAME = "indspeech_newstra_ethnicsr_jav_source"
119
+
120
+ def _info(self) -> datasets.DatasetInfo:
121
+ if self.config.schema == "source":
122
+ features = datasets.Features(
123
+ {
124
+ "id": datasets.Value("string"),
125
+ "speaker_id": datasets.Value("string"),
126
+ "path": datasets.Value("string"),
127
+ "audio": datasets.Audio(sampling_rate=16_000),
128
+ "text": datasets.Value("string"),
129
+ "gender": datasets.Value("string"),
130
+ }
131
+ )
132
+ elif self.config.schema == "nusantara_sptext":
133
+ features = schemas.speech_text_features
134
+
135
+ return datasets.DatasetInfo(
136
+ description=_DESCRIPTION,
137
+ features=features,
138
+ homepage=_HOMEPAGE,
139
+ license=_LICENSE,
140
+ citation=_CITATION,
141
+ )
142
+
143
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
144
+ lang = _lst_STD_LANG[self.config.name.split("_")[4]].lower()
145
+ ds1_train_urls = _URLS["dataset1_train"][lang]
146
+ ds1_test_urls = _URLS["dataset1_test"][lang]
147
+ ds2_train_urls = _URLS["dataset2_train"][lang]
148
+ ds2_test_urls = _URLS["dataset2_test"][lang]
149
+ sp_urls = _URLS["speech"][lang]
150
+ txt_urls = _URLS["transcript"][lang]
151
+
152
+ ds1_train_dir = [Path(dl_manager.download_and_extract(ds1_train_url)) for ds1_train_url in ds1_train_urls]
153
+ ds1_test_dir = [Path(dl_manager.download_and_extract(ds1_test_url)) for ds1_test_url in ds1_test_urls]
154
+ ds2_train_dir = [Path(dl_manager.download_and_extract(ds2_train_url)) for ds2_train_url in ds2_train_urls]
155
+ ds2_test_dir = [Path(dl_manager.download_and_extract(ds2_test_url)) for ds2_test_url in ds2_test_urls]
156
+ sp_dir = {str(Path(sp_url).name)[:-4]: os.path.join(Path(dl_manager.download_and_extract(sp_url)), str(Path(sp_url).name))[:-4] for sp_url in sp_urls}
157
+ txt_dir = {str(Path(txt_url).name)[:-4]: os.path.join(Path(dl_manager.download_and_extract(txt_url)), str(Path(txt_url).name))[:-4] for txt_url in txt_urls}
158
+
159
+ return [
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TRAIN,
162
+ gen_kwargs={
163
+ "filepath": {
164
+ "dataset1": ds1_train_dir,
165
+ "dataset2": ds2_train_dir,
166
+ "speech": sp_dir,
167
+ "transcript": txt_dir,
168
+ },
169
+ "split": "train",
170
+ },
171
+ ),
172
+ datasets.SplitGenerator(
173
+ name=datasets.Split.TEST,
174
+ gen_kwargs={
175
+ "filepath": {
176
+ "dataset1": ds1_test_dir,
177
+ "dataset2": ds2_test_dir,
178
+ "speech": sp_dir,
179
+ "transcript": txt_dir,
180
+ },
181
+ "split": "test",
182
+ },
183
+ ),
184
+ ]
185
+
186
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
187
+ sample_list=[]
188
+ if self.config.name.split("_")[3] == "nooverlap":
189
+ sample_list = [open(samples).read().splitlines() for samples in filepath["dataset1"]] # +filepath["dataset2"]]
190
+ sample_list = list(chain(*sample_list))
191
+ elif self.config.name.split("_")[3] == "overlap":
192
+ sample_list = [open(samples).read().splitlines() for samples in filepath["dataset1"]+filepath["dataset2"]]
193
+ sample_list = list(chain(*sample_list))
194
+
195
+ for id, row in enumerate(sample_list):
196
+ if self.config.schema == "source":
197
+ ex = {
198
+ "id": str(id),
199
+ "speaker_id": str(Path(row).parent).split("/")[1],
200
+ "path": os.path.join(filepath["speech"][str(Path(row).parent).split("/")[1]], str(Path(row).name) + ".wav"),
201
+ "audio": os.path.join(filepath["speech"][str(Path(row).parent).split("/")[1]], str(Path(row).name) + ".wav"),
202
+ "text": open(os.path.join(filepath["transcript"][str(Path(row).parent).split("/")[1]], str(Path(row).name) + ".txt"), "r").read().splitlines()[0],
203
+ "gender": str(Path(row).parent).split("/")[1].split("_")[1],
204
+ }
205
+ yield id, ex
206
+
207
+ elif self.config.schema == "nusantara_sptext":
208
+ ex = {
209
+ "id": str(id),
210
+ "speaker_id": str(Path(row).parent).split("/")[1],
211
+ "path": os.path.join(filepath["speech"][str(Path(row).parent).split("/")[1]], str(Path(row).name) + ".wav"),
212
+ "audio": os.path.join(filepath["speech"][str(Path(row).parent).split("/")[1]], str(Path(row).name) + ".wav"),
213
+ "text": open(os.path.join(filepath["transcript"][str(Path(row).parent).split("/")[1]], str(Path(row).name) + ".txt"), "r").read().splitlines()[0],
214
+ "metadata": {
215
+ "speaker_age": None,
216
+ "speaker_gender": str(Path(row).parent).split("/")[1].split("_")[1],
217
+ },
218
+ }
219
+ yield id, ex