pavanyellow commited on
Commit
b7059c3
·
verified ·
1 Parent(s): 719c650

Delete librispeech_asr.py

Browse files
Files changed (1) hide show
  1. librispeech_asr.py +0 -249
librispeech_asr.py DELETED
@@ -1,249 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Librispeech automatic speech recognition dataset."""
18
-
19
-
20
- import os
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @inproceedings{panayotov2015librispeech,
26
- title={Librispeech: an ASR corpus based on public domain audio books},
27
- author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
28
- booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
29
- pages={5206--5210},
30
- year={2015},
31
- organization={IEEE}
32
- }
33
- """
34
-
35
- _DESCRIPTION = """\
36
- LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,
37
- prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read
38
- audiobooks from the LibriVox project, and has been carefully segmented and aligned.87
39
- """
40
-
41
- _URL = "http://www.openslr.org/12"
42
- _DL_URL = "http://www.openslr.org/resources/12/"
43
-
44
-
45
- _DL_URLS = {
46
- "clean": {
47
- "dev": _DL_URL + "dev-clean.tar.gz",
48
- "test": _DL_URL + "test-clean.tar.gz",
49
- },
50
- "other": {
51
- "test": _DL_URL + "test-other.tar.gz",
52
- "dev": _DL_URL + "dev-other.tar.gz",
53
- "train.500": _DL_URL + "train-other-500.tar.gz",
54
- },
55
- "all": {
56
- "dev.clean": _DL_URL + "dev-clean.tar.gz",
57
- "dev.other": _DL_URL + "dev-other.tar.gz",
58
- "test.clean": _DL_URL + "test-clean.tar.gz",
59
- "test.other": _DL_URL + "test-other.tar.gz",
60
- "train.clean.100": _DL_URL + "train-clean-100.tar.gz",
61
- "train.clean.360": _DL_URL + "train-clean-360.tar.gz",
62
- "train.other.500": _DL_URL + "train-other-500.tar.gz",
63
- },
64
- }
65
-
66
-
67
- class LibrispeechASRConfig(datasets.BuilderConfig):
68
- """BuilderConfig for LibriSpeechASR."""
69
-
70
- def __init__(self, **kwargs):
71
- """
72
- Args:
73
- data_dir: `string`, the path to the folder containing the files in the
74
- downloaded .tar
75
- citation: `string`, citation for the data set
76
- url: `string`, url for information about the data set
77
- **kwargs: keyword arguments forwarded to super.
78
- """
79
- super(LibrispeechASRConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
80
-
81
-
82
- class LibrispeechASR(datasets.GeneratorBasedBuilder):
83
- """Librispeech dataset."""
84
-
85
- DEFAULT_WRITER_BATCH_SIZE = 256
86
- DEFAULT_CONFIG_NAME = "all"
87
- BUILDER_CONFIGS = [
88
- LibrispeechASRConfig(name="clean", description="'Clean' speech."),
89
- LibrispeechASRConfig(name="other", description="'Other', more challenging, speech."),
90
- LibrispeechASRConfig(name="all", description="Combined clean and other dataset."),
91
- ]
92
-
93
- def _info(self):
94
- return datasets.DatasetInfo(
95
- description=_DESCRIPTION,
96
- features=datasets.Features(
97
- {
98
- "file": datasets.Value("string"),
99
- "audio": datasets.Audio(sampling_rate=16_000),
100
- "text": datasets.Value("string"),
101
- "speaker_id": datasets.Value("int64"),
102
- "chapter_id": datasets.Value("int64"),
103
- "id": datasets.Value("string"),
104
- }
105
- ),
106
- supervised_keys=("file", "text"),
107
- homepage=_URL,
108
- citation=_CITATION,
109
- )
110
-
111
- def _split_generators(self, dl_manager):
112
- archive_path = dl_manager.download(_DL_URLS[self.config.name])
113
- # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
114
- local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
115
-
116
- splits = []
117
- if self.config.name == "clean":
118
- splits.extend([
119
- datasets.SplitGenerator(
120
- name="validation", # Changed from datasets.Split.VALIDATION
121
- gen_kwargs={
122
- "local_extracted_archive": local_extracted_archive.get("dev"),
123
- "files": dl_manager.iter_archive(archive_path["dev"]),
124
- },
125
- ),
126
- datasets.SplitGenerator(
127
- name="test", # Changed from datasets.Split.TEST
128
- gen_kwargs={
129
- "local_extracted_archive": local_extracted_archive.get("test"),
130
- "files": dl_manager.iter_archive(archive_path["test"]),
131
- },
132
- )
133
- ])
134
- elif self.config.name == "other":
135
- splits.extend([
136
- datasets.SplitGenerator(
137
- name="train.500",
138
- gen_kwargs={
139
- "local_extracted_archive": local_extracted_archive.get("train.500"),
140
- "files": dl_manager.iter_archive(archive_path["train.500"]),
141
- },
142
- ),
143
- datasets.SplitGenerator(
144
- name="validation",
145
- gen_kwargs={
146
- "local_extracted_archive": local_extracted_archive.get("dev"),
147
- "files": dl_manager.iter_archive(archive_path["dev"]),
148
- },
149
- ),
150
- datasets.SplitGenerator(
151
- name="test",
152
- gen_kwargs={
153
- "local_extracted_archive": local_extracted_archive.get("test"),
154
- "files": dl_manager.iter_archive(archive_path["test"]),
155
- },
156
- )
157
- ])
158
- elif self.config.name == "all":
159
- splits.extend([
160
- datasets.SplitGenerator(
161
- name="train.clean.100",
162
- gen_kwargs={
163
- "local_extracted_archive": local_extracted_archive.get("train.clean.100"),
164
- "files": dl_manager.iter_archive(archive_path["train.clean.100"]),
165
- },
166
- ),
167
- datasets.SplitGenerator(
168
- name="train.clean.360",
169
- gen_kwargs={
170
- "local_extracted_archive": local_extracted_archive.get("train.clean.360"),
171
- "files": dl_manager.iter_archive(archive_path["train.clean.360"]),
172
- },
173
- ),
174
- datasets.SplitGenerator(
175
- name="train.other.500",
176
- gen_kwargs={
177
- "local_extracted_archive": local_extracted_archive.get("train.other.500"),
178
- "files": dl_manager.iter_archive(archive_path["train.other.500"]),
179
- },
180
- ),
181
- datasets.SplitGenerator(
182
- name="validation.clean",
183
- gen_kwargs={
184
- "local_extracted_archive": local_extracted_archive.get("dev.clean"),
185
- "files": dl_manager.iter_archive(archive_path["dev.clean"]),
186
- },
187
- ),
188
- datasets.SplitGenerator(
189
- name="validation.other",
190
- gen_kwargs={
191
- "local_extracted_archive": local_extracted_archive.get("dev.other"),
192
- "files": dl_manager.iter_archive(archive_path["dev.other"]),
193
- },
194
- ),
195
- datasets.SplitGenerator(
196
- name="test.clean",
197
- gen_kwargs={
198
- "local_extracted_archive": local_extracted_archive.get("test.clean"),
199
- "files": dl_manager.iter_archive(archive_path["test.clean"]),
200
- },
201
- ),
202
- datasets.SplitGenerator(
203
- name="test.other",
204
- gen_kwargs={
205
- "local_extracted_archive": local_extracted_archive.get("test.other"),
206
- "files": dl_manager.iter_archive(archive_path["test.other"]),
207
- },
208
- ),
209
- ])
210
-
211
- return splits
212
-
213
- def _generate_examples(self, files, local_extracted_archive):
214
- """Generate examples from a LibriSpeech archive_path."""
215
- key = 0
216
- audio_data = {}
217
- transcripts = []
218
- for path, f in files:
219
- if path.endswith(".flac"):
220
- id_ = path.split("/")[-1][: -len(".flac")]
221
- audio_data[id_] = f.read()
222
- elif path.endswith(".trans.txt"):
223
- for line in f:
224
- if line:
225
- line = line.decode("utf-8").strip()
226
- id_, transcript = line.split(" ", 1)
227
- audio_file = f"{id_}.flac"
228
- speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
229
- audio_file = (
230
- os.path.join(local_extracted_archive, audio_file)
231
- if local_extracted_archive
232
- else audio_file
233
- )
234
- transcripts.append(
235
- {
236
- "id": id_,
237
- "speaker_id": speaker_id,
238
- "chapter_id": chapter_id,
239
- "file": audio_file,
240
- "text": transcript,
241
- }
242
- )
243
- if audio_data and len(audio_data) == len(transcripts):
244
- for transcript in transcripts:
245
- audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
246
- yield key, {"audio": audio, **transcript}
247
- key += 1
248
- audio_data = {}
249
- transcripts = []