add new files
Browse files
su_id_asr.py → su_id_asr_split.py
RENAMED
@@ -38,7 +38,9 @@ _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
|
|
38 |
_LICENSE = "Attribution-ShareAlike 4.0 International."
|
39 |
|
40 |
_URLs = {
|
41 |
-
"
|
|
|
|
|
42 |
}
|
43 |
|
44 |
_SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
|
@@ -93,57 +95,88 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
|
|
93 |
)
|
94 |
|
95 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
base_path[id] = dl_manager.download_and_extract(_URLs["su_id_asr"].format(str(id)))
|
101 |
return [
|
102 |
datasets.SplitGenerator(
|
103 |
name=datasets.Split.TRAIN,
|
104 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
),
|
106 |
]
|
107 |
|
108 |
-
def _generate_examples(self, filepath: Dict):
|
109 |
|
110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
with open(tsv_file, "r") as file:
|
117 |
-
tsv_file = csv.reader(file, delimiter="\t")
|
118 |
-
|
119 |
-
for line in tsv_file:
|
120 |
-
audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
|
121 |
-
|
122 |
-
wav_path = os.path.join(each_filepath, "asr_sundanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
|
123 |
-
|
124 |
-
if os.path.exists(wav_path):
|
125 |
-
if self.config.schema == "source":
|
126 |
-
ex = {
|
127 |
-
"id": audio_id,
|
128 |
-
"speaker_id": speaker_id,
|
129 |
-
"path": wav_path,
|
130 |
-
"audio": wav_path,
|
131 |
-
"text": transcription_text,
|
132 |
-
}
|
133 |
-
yield audio_id, ex
|
134 |
-
elif self.config.schema == "seacrowd_sptext":
|
135 |
-
ex = {
|
136 |
-
"id": audio_id,
|
137 |
-
"speaker_id": speaker_id,
|
138 |
-
"path": wav_path,
|
139 |
-
"audio": wav_path,
|
140 |
-
"text": transcription_text,
|
141 |
-
"metadata": {
|
142 |
-
"speaker_age": None,
|
143 |
-
"speaker_gender": None,
|
144 |
-
},
|
145 |
-
}
|
146 |
-
yield audio_id, ex
|
147 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
else:
|
149 |
-
raise ValueError(f"Invalid config: {self.config.name}")
|
|
|
38 |
_LICENSE = "Attribution-ShareAlike 4.0 International."
|
39 |
|
40 |
_URLs = {
|
41 |
+
"su_id_asr_train": "https://univindonesia-my.sharepoint.com/personal/patrick_samuel_office_ui_ac_id/_layouts/15/download.aspx?share=ESbYerhrepxPsggILmK8hZwB9ywXeZzLX7fF885Yo9F7JA",
|
42 |
+
"su_id_asr_dev": "https://univindonesia-my.sharepoint.com/personal/patrick_samuel_office_ui_ac_id/_layouts/15/download.aspx?share=EdmZ2KYglRBJrKacGRklGD4BEcZXqY6txIrEhj2csx3I3g",
|
43 |
+
"su_id_asr_test": "https://univindonesia-my.sharepoint.com/personal/patrick_samuel_office_ui_ac_id/_layouts/15/download.aspx?share=ET_Yu0vwbk9Mu-2vg68mSnkBJ-CnY1DOBjm8GVjGLKFZxQ",
|
44 |
}
|
45 |
|
46 |
_SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
|
|
|
95 |
)
|
96 |
|
97 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
98 |
+
base_path_train = dl_manager.download_and_extract(_URLs["su_id_asr_train"])
|
99 |
+
base_path_validation = dl_manager.download_and_extract(_URLs["su_id_asr_validation"])
|
100 |
+
base_path_test = dl_manager.download_and_extract(_URLs["su_id_asr_test"])
|
101 |
+
|
|
|
102 |
return [
|
103 |
datasets.SplitGenerator(
|
104 |
name=datasets.Split.TRAIN,
|
105 |
+
gen_kwargs={
|
106 |
+
"filepath": base_path_train,
|
107 |
+
"split": "train"
|
108 |
+
},
|
109 |
+
),
|
110 |
+
datasets.SplitGenerator(
|
111 |
+
name=datasets.Split.VALIDATION,
|
112 |
+
gen_kwargs={
|
113 |
+
"filepath": base_path_validation,
|
114 |
+
"split": "validation"
|
115 |
+
},
|
116 |
+
),
|
117 |
+
datasets.SplitGenerator(
|
118 |
+
name=datasets.Split.TEST,
|
119 |
+
gen_kwargs={
|
120 |
+
"filepath": base_path_test,
|
121 |
+
"split": "test"
|
122 |
+
},
|
123 |
),
|
124 |
]
|
125 |
|
|
|
126 |
|
127 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
128 |
+
return [
|
129 |
+
datasets.SplitGenerator(
|
130 |
+
name=datasets.Split.TRAIN,
|
131 |
+
gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_train"])},
|
132 |
+
),
|
133 |
+
datasets.SplitGenerator(
|
134 |
+
name=datasets.Split.VALIDATION,
|
135 |
+
gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_dev"])},
|
136 |
+
),
|
137 |
+
datasets.SplitGenerator(
|
138 |
+
name=datasets.Split.TEST,
|
139 |
+
gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_test"])},
|
140 |
+
)
|
141 |
+
]
|
142 |
|
143 |
+
def _generate_examples(self, filepath: str):
|
144 |
+
|
145 |
+
if self.config.schema == "source" or self.config.schema == "seacrowd_sptext":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
+
tsv_file = os.path.join(filepath, "asr_sundanese", "utt_spk_text.tsv")
|
148 |
+
|
149 |
+
with open(tsv_file, "r") as file:
|
150 |
+
tsv_file = csv.reader(file, delimiter="\t")
|
151 |
+
|
152 |
+
for line in tsv_file:
|
153 |
+
audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
|
154 |
+
|
155 |
+
wav_path = os.path.join(filepath, "asr_sundanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
|
156 |
+
|
157 |
+
if os.path.exists(wav_path):
|
158 |
+
if self.config.schema == "source":
|
159 |
+
ex = {
|
160 |
+
"id": audio_id,
|
161 |
+
"speaker_id": speaker_id,
|
162 |
+
"path": wav_path,
|
163 |
+
"audio": wav_path,
|
164 |
+
"text": transcription_text,
|
165 |
+
}
|
166 |
+
yield audio_id, ex
|
167 |
+
elif self.config.schema == "seacrowd_sptext":
|
168 |
+
ex = {
|
169 |
+
"id": audio_id,
|
170 |
+
"speaker_id": speaker_id,
|
171 |
+
"path": wav_path,
|
172 |
+
"audio": wav_path,
|
173 |
+
"text": transcription_text,
|
174 |
+
"metadata": {
|
175 |
+
"speaker_age": None,
|
176 |
+
"speaker_gender": None,
|
177 |
+
},
|
178 |
+
}
|
179 |
+
yield audio_id, ex
|
180 |
+
|
181 |
else:
|
182 |
+
raise ValueError(f"Invalid config: {self.config.name}")
|