Datasets:
LIUM
/

sanchit-gandhi commited on
Commit
21b99e4
·
1 Parent(s): 765c78e

Make tedlium work in streaming mode

Browse files
Files changed (1) hide show
  1. tedlium.py +165 -59
tedlium.py CHANGED
@@ -16,6 +16,8 @@
16
 
17
  import os
18
  import re
 
 
19
  from pathlib import Path
20
 
21
  import numpy as np
@@ -25,16 +27,18 @@ import datasets
25
  from datasets.tasks import AutomaticSpeechRecognition
26
 
27
 
 
 
28
  _LICENSE = "licensed under Creative Commons BY-NC-ND 3.0 (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en)"
29
 
30
 
31
  class TedliumReleaseConfig(datasets.BuilderConfig):
32
  """BuilderConfig for a release of the TED-LIUM dataset."""
33
 
34
- def __init__(self, *, url, download_url, split_paths, citation, **kwargs):
35
  super(TedliumReleaseConfig, self).__init__(version=datasets.Version("1.0.1"), **kwargs)
36
  self.url = url
37
- self.download_url = download_url
38
  # List of split, path pairs containing the relative path within the
39
  # extracted tarball to the data for each split.
40
  self.split_paths = split_paths
@@ -63,11 +67,15 @@ def _make_builder_configs():
63
  }
64
  """,
65
  url="https://www.openslr.org/7/",
66
- download_url="http://www.openslr.org/resources/7/TEDLIUM_release1.tar.gz",
 
 
 
 
67
  split_paths=[
68
- (datasets.Split.TRAIN, os.path.join("TEDLIUM_release1", "train")),
69
- (datasets.Split.VALIDATION, os.path.join("TEDLIUM_release1", "dev")),
70
- (datasets.Split.TEST, os.path.join("TEDLIUM_release1", "test")),
71
  ],
72
  )
73
 
@@ -97,11 +105,15 @@ def _make_builder_configs():
97
  }
98
  """,
99
  url="https://www.openslr.org/19/",
100
- download_url="http://www.openslr.org/resources/19/TEDLIUM_release2.tar.gz",
 
 
 
 
101
  split_paths=[
102
- (datasets.Split.TRAIN, os.path.join("TEDLIUM_release2", "train")),
103
- (datasets.Split.VALIDATION, os.path.join("TEDLIUM_release2", "dev")),
104
- (datasets.Split.TEST, os.path.join("TEDLIUM_release2", "test")),
105
  ],
106
  )
107
 
@@ -109,7 +121,8 @@ def _make_builder_configs():
109
  name="release3",
110
  description="""\
111
  This is the TED-LIUM corpus release 3, licensed under Creative Commons
112
- BY-NC-ND 3.0.
 
113
 
114
  All talks and text are property of TED Conferences LLC.
115
 
@@ -124,7 +137,7 @@ def _make_builder_configs():
124
  - 452 hours of audio
125
  - 2351 aligned automatic transcripts in STM format
126
  - TEDLIUM 2 dev and test data: 19 TED talks in SPH format with
127
- corresponding manual transcriptions (cf. 'legacy' distribution below).
128
  - Dictionary with pronunciations (159848 entries), same file as the one
129
  included in TED-LIUM 2
130
  - Selected monolingual data for language modeling from WMT12 publicly
@@ -132,11 +145,6 @@ def _make_builder_configs():
132
  have been modified to get a tokenization more relevant for English
133
  language
134
 
135
- Two corpus distributions:
136
- - the legacy one, on which the dev and test datasets are the same as in
137
- TED-LIUM 2 (and TED-LIUM 1).
138
- - the 'speaker adaptation' one, especially designed for experiments on
139
- speaker adaptation.
140
  """,
141
  citation="""\
142
  @inproceedings{hernandez2018tedlium3,
@@ -149,18 +157,54 @@ def _make_builder_configs():
149
  }
150
  """,
151
  url="https://www.openslr.org/51/",
152
- download_url="http://www.openslr.org/resources/51/TEDLIUM_release-3.tgz",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  split_paths=[
154
- (datasets.Split.VALIDATION, os.path.join("TEDLIUM_release-3", "legacy", "dev")),
155
- (datasets.Split.TEST, os.path.join("TEDLIUM_release-3", "legacy", "test")),
156
- # The legacy/train directory contains symlinks to "data",
157
- # which are skipped by extraction (see above).
158
- # Work around this by manually dereferencing the links here.
159
- (datasets.Split.TRAIN, os.path.join("TEDLIUM_release-3", "data")),
160
  ],
161
  )
162
 
163
- return [release1, release2, release3]
164
 
165
 
166
  class TedLium(datasets.GeneratorBasedBuilder):
@@ -192,46 +236,108 @@ class TedLium(datasets.GeneratorBasedBuilder):
192
  )
193
 
194
  def _split_generators(self, dl_manager):
195
- data_dir = dl_manager.download_and_extract(self.config.download_url)
 
 
196
  splits = []
197
  for split, path in self.config.split_paths:
198
- kwargs = {"filepath": os.path.join(data_dir, path)}
 
 
 
 
199
  splits.append(datasets.SplitGenerator(name=split, gen_kwargs=kwargs))
200
  return splits
201
 
202
- def _generate_examples(self, filepath):
203
  """Generate examples from a TED-LIUM stm file."""
204
- # The stm directory houses the speaker and transcription information in .stm format
205
- stm_dir = os.path.join(filepath, "stm")
206
- # The sph directory houses the audio files in .sph format
207
- sph_dir = os.path.join(filepath, "sph")
208
- stm_files = [os.path.join(stm_dir, f) for f in os.listdir(stm_dir) if f.endswith(".stm")]
209
- for file in stm_files:
210
- # the .sph speaker file almost always has the same file name as the .stm file
211
- speaker_file = Path(file).stem
212
- audio_file = os.path.join(sph_dir, speaker_file + ".sph")
213
- segment, sampling_rate = sf.read(audio_file, dtype=np.int16)
214
- with open(file) as f:
215
- for line in f:
216
- line = line.strip()
217
- fn, channel, speaker, start, end, label, transcript = line.split(" ", 6)
218
- transcript = _maybe_trim_suffix(transcript)
219
- if speaker_file != fn:
220
- # handle the case where the stm file does not have the same file name as the transcript
221
- speaker_file = fn
222
- segment, sampling_rate = sf.read(audio_file, dtype=np.int16)
223
- samples = _extract_audio_segment(segment, int(channel), float(start), float(end))
224
-
225
- key = "-".join([speaker, start, end, label])
226
- example = {
227
- "audio": {"path": file, "array": samples, "sampling_rate": sampling_rate},
228
- "text": transcript,
229
- "speaker_id": speaker,
230
- "gender": _parse_gender(label),
231
- "file": file,
232
- "id": key,
233
- }
234
- yield key, example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
 
236
 
237
  def _maybe_trim_suffix(transcript):
 
16
 
17
  import os
18
  import re
19
+ from collections import defaultdict
20
+ from io import BytesIO
21
  from pathlib import Path
22
 
23
  import numpy as np
 
27
  from datasets.tasks import AutomaticSpeechRecognition
28
 
29
 
30
+ _DL_URL = "https://huggingface.co/datasets/LIUM/tedlium/resolve/main/"
31
+
32
  _LICENSE = "licensed under Creative Commons BY-NC-ND 3.0 (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en)"
33
 
34
 
35
  class TedliumReleaseConfig(datasets.BuilderConfig):
36
  """BuilderConfig for a release of the TED-LIUM dataset."""
37
 
38
+ def __init__(self, *, url, download_urls, split_paths, citation, **kwargs):
39
  super(TedliumReleaseConfig, self).__init__(version=datasets.Version("1.0.1"), **kwargs)
40
  self.url = url
41
+ self.download_urls = download_urls
42
  # List of split, path pairs containing the relative path within the
43
  # extracted tarball to the data for each split.
44
  self.split_paths = split_paths
 
67
  }
68
  """,
69
  url="https://www.openslr.org/7/",
70
+ download_urls={
71
+ "train": _DL_URL + os.path.join("TEDLIUM_release1", "train.tar.gz"),
72
+ "validation": _DL_URL + os.path.join("TEDLIUM_release1", "dev.tar.gz"),
73
+ "test": _DL_URL + os.path.join("TEDLIUM_release1", "test.tar.gz"),
74
+ },
75
  split_paths=[
76
+ (datasets.Split.TRAIN, "train"),
77
+ (datasets.Split.VALIDATION, "dev"),
78
+ (datasets.Split.TEST, "test"),
79
  ],
80
  )
81
 
 
105
  }
106
  """,
107
  url="https://www.openslr.org/19/",
108
+ download_urls={
109
+ "train": _DL_URL + os.path.join("TEDLIUM_release2", "train.tar.gz"),
110
+ "validation": _DL_URL + os.path.join("TEDLIUM_release2", "dev.tar.gz"),
111
+ "test": _DL_URL + os.path.join("TEDLIUM_release2", "test.tar.gz"),
112
+ },
113
  split_paths=[
114
+ (datasets.Split.TRAIN, "train"),
115
+ (datasets.Split.VALIDATION, "dev"),
116
+ (datasets.Split.TEST, "test"),
117
  ],
118
  )
119
 
 
121
  name="release3",
122
  description="""\
123
  This is the TED-LIUM corpus release 3, licensed under Creative Commons
124
+ BY-NC-ND 3.0. This is the 'legacy' version of the corpus, in which the dev and test datasets are the same as in
125
+ TED-LIUM 2 (and TED-LIUM 1).
126
 
127
  All talks and text are property of TED Conferences LLC.
128
 
 
137
  - 452 hours of audio
138
  - 2351 aligned automatic transcripts in STM format
139
  - TEDLIUM 2 dev and test data: 19 TED talks in SPH format with
140
+ corresponding manual transcriptions.
141
  - Dictionary with pronunciations (159848 entries), same file as the one
142
  included in TED-LIUM 2
143
  - Selected monolingual data for language modeling from WMT12 publicly
 
145
  have been modified to get a tokenization more relevant for English
146
  language
147
 
 
 
 
 
 
148
  """,
149
  citation="""\
150
  @inproceedings{hernandez2018tedlium3,
 
157
  }
158
  """,
159
  url="https://www.openslr.org/51/",
160
+ download_urls={
161
+ "train": _DL_URL + os.path.join("TEDLIUM_release3", "legacy", "train.tar.gz"),
162
+ "validation": _DL_URL + os.path.join("TEDLIUM_release3", "legacy", "dev.tar.gz"),
163
+ "test": _DL_URL + os.path.join("TEDLIUM_release3", "legacy", "test.tar.gz"),
164
+ },
165
+ split_paths=[
166
+ (datasets.Split.TRAIN, "train"),
167
+ (datasets.Split.VALIDATION, "dev"),
168
+ (datasets.Split.TEST, "test"),
169
+ ],
170
+ )
171
+
172
+ release3_speaker_adaptation = TedliumReleaseConfig(
173
+ name="release3-speaker-adaptation",
174
+ description="""\
175
+ This is the TED-LIUM corpus release 3, licensed under Creative Commons
176
+ BY-NC-ND 3.0. This is the 'speaker adaptation' version of the corpus, specially designed for experiments on
177
+ speaker adaptation.
178
+
179
+ All talks and text are property of TED Conferences LLC.
180
+
181
+ This new TED-LIUM release was made through a collaboration between the
182
+ Ubiqus company and the LIUM (University of Le Mans, France)
183
+ """,
184
+ citation="""\
185
+ @inproceedings{hernandez2018tedlium3,
186
+ title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation},
187
+ author={Hernandez, Fran{\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\`e}ve, Yannick},
188
+ booktitle={International Conference on Speech and Computer},
189
+ pages={198--208},
190
+ year={2018},
191
+ organization={Springer}
192
+ }
193
+ """,
194
+ url="https://www.openslr.org/51/",
195
+ download_urls={
196
+ "train": _DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "train.tar.gz"),
197
+ "validation": _DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "dev.tar.gz"),
198
+ "test": _DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "test.tar.gz"),
199
+ },
200
  split_paths=[
201
+ (datasets.Split.TRAIN, "train"),
202
+ (datasets.Split.VALIDATION, "dev"),
203
+ (datasets.Split.TEST, "test"),
 
 
 
204
  ],
205
  )
206
 
207
+ return [release1, release2, release3, release3_speaker_adaptation]
208
 
209
 
210
  class TedLium(datasets.GeneratorBasedBuilder):
 
236
  )
237
 
238
  def _split_generators(self, dl_manager):
239
+ archive_path = dl_manager.download(self.config.download_urls)
240
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
241
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
242
  splits = []
243
  for split, path in self.config.split_paths:
244
+ kwargs = {
245
+ "filepath": dl_manager.iter_archive(archive_path[split]),
246
+ "local_extracted_archive": local_extracted_archive.get(split),
247
+ "split_path": path,
248
+ }
249
  splits.append(datasets.SplitGenerator(name=split, gen_kwargs=kwargs))
250
  return splits
251
 
252
+ def _generate_examples(self, filepath, local_extracted_archive, split_path):
253
  """Generate examples from a TED-LIUM stm file."""
254
+ if local_extracted_archive:
255
+ # The stm directory houses the speaker and transcription information in .stm format
256
+ stm_dir = os.path.join(local_extracted_archive, split_path, "stm")
257
+ # The sph directory houses the audio files in .sph format
258
+ sph_dir = os.path.join(local_extracted_archive, split_path, "sph")
259
+ stm_files = [os.path.join(stm_dir, f) for f in os.listdir(stm_dir) if f.endswith(".stm")]
260
+ for file in stm_files:
261
+ # the .sph speaker file almost always has the same file name as the .stm file
262
+ speaker_file = Path(file).stem
263
+ audio_file = os.path.join(sph_dir, speaker_file + ".sph")
264
+ segment, sampling_rate = sf.read(audio_file, dtype=np.int16)
265
+ with open(file) as f:
266
+ for line in f:
267
+ line = line.strip()
268
+ fn, channel, speaker, start, end, label, transcript = line.split(" ", 6)
269
+ transcript = _maybe_trim_suffix(transcript)
270
+ if speaker_file != fn:
271
+ # handle the case where the stm file does not have the same file name as the transcript
272
+ speaker_file = fn
273
+ audio_file = os.path.join(sph_dir, speaker_file + ".sph")
274
+ segment, sampling_rate = sf.read(audio_file, dtype=np.int16)
275
+ samples = _extract_audio_segment(segment, int(channel), float(start), float(end))
276
+ key = "-".join([speaker, start, end, label])
277
+ example = {
278
+ "audio": {"path": audio_file, "array": samples, "sampling_rate": sampling_rate},
279
+ "text": transcript,
280
+ "speaker_id": speaker,
281
+ "gender": _parse_gender(label),
282
+ "file": audio_file,
283
+ "id": key,
284
+ }
285
+ yield key, example
286
+
287
+ else:
288
+ audio_data = {}
289
+ transcripts = defaultdict(list)
290
+ for path, f in filepath:
291
+ if path.endswith(".sph"):
292
+ # get the speaker id
293
+ fn = path.split("/")[-1].strip(".sph")
294
+ # read the audio data from raw byte form and add key-value pair to dict
295
+ audio_data[fn] = sf.read(BytesIO(f.read()), dtype=np.int16)
296
+ elif path.endswith(".stm"):
297
+ for line in f:
298
+ if line:
299
+ line = line.decode("utf-8").strip()
300
+ fn, channel, speaker, start, end, label, transcript = line.split(" ", 6)
301
+ transcript = _maybe_trim_suffix(transcript)
302
+ audio_file = path.replace("stm", "sph")
303
+ key = "-".join([speaker, start, end, label])
304
+ # append metadata information to the dict of transcripts for the associated speaker
305
+ transcripts[fn].append(
306
+ {
307
+ "text": transcript,
308
+ "speaker_id": speaker,
309
+ "gender": _parse_gender(label),
310
+ "file": audio_file,
311
+ "id": key,
312
+ "start": start,
313
+ "end": end,
314
+ "channel": channel,
315
+ "fn": fn,
316
+ }
317
+ )
318
+
319
+ if audio_data and audio_data.keys() == transcripts.keys():
320
+ for fn, speaker in transcripts.items():
321
+ for transcript in speaker:
322
+ segment, sampling_rate = audio_data[transcript["fn"]]
323
+ samples = _extract_audio_segment(
324
+ segment,
325
+ int(transcript["channel"]),
326
+ float(transcript["start"]),
327
+ float(transcript["end"]),
328
+ )
329
+ audio = {"path": transcript["file"], "array": samples, "sampling_rate": sampling_rate}
330
+ key = transcript["id"]
331
+ yield key, {
332
+ "audio": audio,
333
+ "text": transcript["text"],
334
+ "speaker_id": transcript["speaker_id"],
335
+ "gender": transcript["gender"],
336
+ "file": transcript["file"],
337
+ "id": transcript["id"],
338
+ }
339
+ audio_data = {}
340
+ transcripts = defaultdict(list)
341
 
342
 
343
  def _maybe_trim_suffix(transcript):