patrickvonplaten commited on
Commit
f1cb732
1 Parent(s): d0401a8

[Librispeech] Add 'all' config (#4184)

Browse files

* [Librispeech] Add 'all' config

* Update datasets/librispeech_asr/librispeech_asr.py

* apply suggestions

* correct paths

* up

* up

* up

* up

* up

Co-authored-by: Patrick von Platen <[email protected]>

Commit from https://github.com/huggingface/datasets/commit/91d7171b81a962a6822b880f12ecd74e80a4e77a

Files changed (3) hide show
  1. README.md +1 -1
  2. dataset_infos.json +1 -1
  3. librispeech_asr.py +130 -28
README.md CHANGED
@@ -20,7 +20,7 @@ task_categories:
20
  - automatic-speech-recognition
21
  - audio-classification
22
  task_ids:
23
- - audio-speaker-identification
24
  ---
25
 
26
  # Dataset Card for librispeech_asr
 
20
  - automatic-speech-recognition
21
  - audio-classification
22
  task_ids:
23
+ - speaker-identification
24
  ---
25
 
26
  # Dataset Card for librispeech_asr
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"clean": {"description": "LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,\nprepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read\naudiobooks from the LibriVox project, and has been carefully segmented and aligned.87\n\nNote that in order to limit the required storage for preparing this dataset, the audio\nis stored in the .flac format and is not converted to a float32 array. To convert, the audio\nfile to a float32 array, please make use of the `.map()` function as follows:\n\n\n```python\nimport soundfile as sf\n\ndef map_to_array(batch):\n speech_array, _ = sf.read(batch[\"file\"])\n batch[\"speech\"] = speech_array\n return batch\n\ndataset = dataset.map(map_to_array, remove_columns=[\"file\"])\n```\n", "citation": "@inproceedings{panayotov2015librispeech,\n title={Librispeech: an ASR corpus based on public domain audio books},\n author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},\n booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},\n pages={5206--5210},\n year={2015},\n organization={IEEE}\n}\n", "homepage": "http://www.openslr.org/12", "license": "", "features": {"file": {"dtype": "string", "id": null, "_type": "Value"}, "audio": {"sampling_rate": 16000, "mono": true, "_storage_dtype": "struct", "id": null, "_type": "Audio"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "speaker_id": {"dtype": "int64", "id": null, "_type": "Value"}, "chapter_id": {"dtype": "int64", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "file", "output": "text"}, "task_templates": [{"task": "automatic-speech-recognition", "audio_column": "audio", "transcription_column": "text"}], "builder_name": "librispeech_asr", "config_name": "clean", "version": {"version_str": "2.1.0", "description": "", "major": 2, "minor": 1, "patch": 0}, "splits": {"train.100": {"name": "train.100", "num_bytes": 6619683041, "num_examples": 28539, "dataset_name": "librispeech_asr"}, "train.360": {"name": "train.360", "num_bytes": 23898214592, "num_examples": 104014, "dataset_name": "librispeech_asr"}, "validation": {"name": "validation", "num_bytes": 359572231, "num_examples": 2703, "dataset_name": "librispeech_asr"}, "test": {"name": "test", "num_bytes": 367705423, "num_examples": 2620, "dataset_name": "librispeech_asr"}}, "download_checksums": {"http://www.openslr.org/resources/12/dev-clean.tar.gz": {"num_bytes": 337926286, "checksum": "76f87d090650617fca0cac8f88b9416e0ebf80350acb97b343a85fa903728ab3"}, "http://www.openslr.org/resources/12/test-clean.tar.gz": {"num_bytes": 346663984, "checksum": "39fde525e59672dc6d1551919b1478f724438a95aa55f874b576be21967e6c23"}, "http://www.openslr.org/resources/12/train-clean-100.tar.gz": {"num_bytes": 6387309499, "checksum": "d4ddd1d5a6ab303066f14971d768ee43278a5f2a0aa43dc716b0e64ecbbbf6e2"}, "http://www.openslr.org/resources/12/train-clean-360.tar.gz": {"num_bytes": 23049477885, "checksum": "146a56496217e96c14334a160df97fffedd6e0a04e66b9c5af0d40be3c792ecf"}}, "download_size": 30121377654, "post_processing_size": null, "dataset_size": 31245175287, "size_in_bytes": 61366552941}, "other": {"description": "LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,\nprepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read\naudiobooks from the LibriVox project, and has been carefully segmented and aligned.87\n\nNote that in order to limit the required storage for preparing this dataset, the audio\nis stored in the .flac format and is not converted to a float32 array. To convert, the audio\nfile to a float32 array, please make use of the `.map()` function as follows:\n\n\n```python\nimport soundfile as sf\n\ndef map_to_array(batch):\n speech_array, _ = sf.read(batch[\"file\"])\n batch[\"speech\"] = speech_array\n return batch\n\ndataset = dataset.map(map_to_array, remove_columns=[\"file\"])\n```\n", "citation": "@inproceedings{panayotov2015librispeech,\n title={Librispeech: an ASR corpus based on public domain audio books},\n author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},\n booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},\n pages={5206--5210},\n year={2015},\n organization={IEEE}\n}\n", "homepage": "http://www.openslr.org/12", "license": "", "features": {"file": {"dtype": "string", "id": null, "_type": "Value"}, "audio": {"sampling_rate": 16000, "mono": true, "_storage_dtype": "struct", "id": null, "_type": "Audio"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "speaker_id": {"dtype": "int64", "id": null, "_type": "Value"}, "chapter_id": {"dtype": "int64", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "file", "output": "text"}, "task_templates": [{"task": "automatic-speech-recognition", "audio_column": "audio", "transcription_column": "text"}], "builder_name": "librispeech_asr", "config_name": "other", "version": {"version_str": "2.1.0", "description": "", "major": 2, "minor": 1, "patch": 0}, "splits": {"train.500": {"name": "train.500", "num_bytes": 31810256902, "num_examples": 148688, "dataset_name": "librispeech_asr"}, "validation": {"name": "validation", "num_bytes": 337283304, "num_examples": 2864, "dataset_name": "librispeech_asr"}, "test": {"name": "test", "num_bytes": 352396474, "num_examples": 2939, "dataset_name": "librispeech_asr"}}, "download_checksums": {"http://www.openslr.org/resources/12/test-other.tar.gz": {"num_bytes": 328757843, "checksum": "d09c181bba5cf717b3dee7d4d592af11a3ee3a09e08ae025c5506f6ebe961c29"}, "http://www.openslr.org/resources/12/dev-other.tar.gz": {"num_bytes": 314305928, "checksum": "12661c48e8c3fe1de2c1caa4c3e135193bfb1811584f11f569dd12645aa84365"}, "http://www.openslr.org/resources/12/train-other-500.tar.gz": {"num_bytes": 30593501606, "checksum": "ddb22f27f96ec163645d53215559df6aa36515f26e01dd70798188350adcb6d2"}}, "download_size": 31236565377, "post_processing_size": null, "dataset_size": 32499936680, "size_in_bytes": 63736502057}}
 
1
+ {"clean": {"description": "LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,\nprepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read\naudiobooks from the LibriVox project, and has been carefully segmented and aligned.87\n\nNote that in order to limit the required storage for preparing this dataset, the audio\nis stored in the .flac format and is not converted to a float32 array. To convert, the audio\nfile to a float32 array, please make use of the `.map()` function as follows:\n\n\n```python\nimport soundfile as sf\n\ndef map_to_array(batch):\n speech_array, _ = sf.read(batch[\"file\"])\n batch[\"speech\"] = speech_array\n return batch\n\ndataset = dataset.map(map_to_array, remove_columns=[\"file\"])\n```\n", "citation": "@inproceedings{panayotov2015librispeech,\n title={Librispeech: an ASR corpus based on public domain audio books},\n author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},\n booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},\n pages={5206--5210},\n year={2015},\n organization={IEEE}\n}\n", "homepage": "http://www.openslr.org/12", "license": "", "features": {"file": {"dtype": "string", "id": null, "_type": "Value"}, "audio": {"sampling_rate": 16000, "mono": true, "decode": true, "id": null, "_type": "Audio"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "speaker_id": {"dtype": "int64", "id": null, "_type": "Value"}, "chapter_id": {"dtype": "int64", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "file", "output": "text"}, "task_templates": [{"task": "automatic-speech-recognition", "audio_column": "audio", "transcription_column": "text"}], "builder_name": "librispeech_asr", "config_name": "clean", "version": {"version_str": "2.1.0", "description": "", "major": 2, "minor": 1, "patch": 0}, "splits": {"train.100": {"name": "train.100", "num_bytes": 6619683041, "num_examples": 28539, "dataset_name": "librispeech_asr"}, "train.360": {"name": "train.360", "num_bytes": 23898214592, "num_examples": 104014, "dataset_name": "librispeech_asr"}, "validation": {"name": "validation", "num_bytes": 359572231, "num_examples": 2703, "dataset_name": "librispeech_asr"}, "test": {"name": "test", "num_bytes": 367705423, "num_examples": 2620, "dataset_name": "librispeech_asr"}}, "download_checksums": {"http://www.openslr.org/resources/12/dev-clean.tar.gz": {"num_bytes": 337926286, "checksum": "76f87d090650617fca0cac8f88b9416e0ebf80350acb97b343a85fa903728ab3"}, "http://www.openslr.org/resources/12/test-clean.tar.gz": {"num_bytes": 346663984, "checksum": "39fde525e59672dc6d1551919b1478f724438a95aa55f874b576be21967e6c23"}, "http://www.openslr.org/resources/12/train-clean-100.tar.gz": {"num_bytes": 6387309499, "checksum": "d4ddd1d5a6ab303066f14971d768ee43278a5f2a0aa43dc716b0e64ecbbbf6e2"}, "http://www.openslr.org/resources/12/train-clean-360.tar.gz": {"num_bytes": 23049477885, "checksum": "146a56496217e96c14334a160df97fffedd6e0a04e66b9c5af0d40be3c792ecf"}}, "download_size": 30121377654, "post_processing_size": null, "dataset_size": 31245175287, "size_in_bytes": 61366552941}, "other": {"description": "LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,\nprepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read\naudiobooks from the LibriVox project, and has been carefully segmented and aligned.87\n\nNote that in order to limit the required storage for preparing this dataset, the audio\nis stored in the .flac format and is not converted to a float32 array. To convert, the audio\nfile to a float32 array, please make use of the `.map()` function as follows:\n\n\n```python\nimport soundfile as sf\n\ndef map_to_array(batch):\n speech_array, _ = sf.read(batch[\"file\"])\n batch[\"speech\"] = speech_array\n return batch\n\ndataset = dataset.map(map_to_array, remove_columns=[\"file\"])\n```\n", "citation": "@inproceedings{panayotov2015librispeech,\n title={Librispeech: an ASR corpus based on public domain audio books},\n author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},\n booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},\n pages={5206--5210},\n year={2015},\n organization={IEEE}\n}\n", "homepage": "http://www.openslr.org/12", "license": "", "features": {"file": {"dtype": "string", "id": null, "_type": "Value"}, "audio": {"sampling_rate": 16000, "mono": true, "decode": true, "id": null, "_type": "Audio"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "speaker_id": {"dtype": "int64", "id": null, "_type": "Value"}, "chapter_id": {"dtype": "int64", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "file", "output": "text"}, "task_templates": [{"task": "automatic-speech-recognition", "audio_column": "audio", "transcription_column": "text"}], "builder_name": "librispeech_asr", "config_name": "other", "version": {"version_str": "2.1.0", "description": "", "major": 2, "minor": 1, "patch": 0}, "splits": {"train.500": {"name": "train.500", "num_bytes": 31810256902, "num_examples": 148688, "dataset_name": "librispeech_asr"}, "validation": {"name": "validation", "num_bytes": 337283304, "num_examples": 2864, "dataset_name": "librispeech_asr"}, "test": {"name": "test", "num_bytes": 352396474, "num_examples": 2939, "dataset_name": "librispeech_asr"}}, "download_checksums": {"http://www.openslr.org/resources/12/test-other.tar.gz": {"num_bytes": 328757843, "checksum": "d09c181bba5cf717b3dee7d4d592af11a3ee3a09e08ae025c5506f6ebe961c29"}, "http://www.openslr.org/resources/12/dev-other.tar.gz": {"num_bytes": 314305928, "checksum": "12661c48e8c3fe1de2c1caa4c3e135193bfb1811584f11f569dd12645aa84365"}, "http://www.openslr.org/resources/12/train-other-500.tar.gz": {"num_bytes": 30593501606, "checksum": "ddb22f27f96ec163645d53215559df6aa36515f26e01dd70798188350adcb6d2"}}, "download_size": 31236565377, "post_processing_size": null, "dataset_size": 32499936680, "size_in_bytes": 63736502057}, "all": {"description": "LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,\nprepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read\naudiobooks from the LibriVox project, and has been carefully segmented and aligned.87\n", "citation": "@inproceedings{panayotov2015librispeech,\n title={Librispeech: an ASR corpus based on public domain audio books},\n author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},\n booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},\n pages={5206--5210},\n year={2015},\n organization={IEEE}\n}\n", "homepage": "http://www.openslr.org/12", "license": "", "features": {"file": {"dtype": "string", "id": null, "_type": "Value"}, "audio": {"sampling_rate": 16000, "mono": true, "decode": true, "id": null, "_type": "Audio"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "speaker_id": {"dtype": "int64", "id": null, "_type": "Value"}, "chapter_id": {"dtype": "int64", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "file", "output": "text"}, "task_templates": [{"task": "automatic-speech-recognition", "audio_column": "audio", "transcription_column": "text"}], "builder_name": "librispeech_asr", "config_name": "all", "version": {"version_str": "2.1.0", "description": "", "major": 2, "minor": 1, "patch": 0}, "splits": {"train.clean.100": {"name": "train.clean.100", "num_bytes": 6627791685, "num_examples": 28539, "dataset_name": "librispeech_asr"}, "train.clean.360": {"name": "train.clean.360", "num_bytes": 23927767570, "num_examples": 104014, "dataset_name": "librispeech_asr"}, "train.other.500": {"name": "train.other.500", "num_bytes": 31852502880, "num_examples": 148688, "dataset_name": "librispeech_asr"}, "validation.clean": {"name": "validation.clean", "num_bytes": 359505691, "num_examples": 2703, "dataset_name": "librispeech_asr"}, "validation.other": {"name": "validation.other", "num_bytes": 337213112, "num_examples": 2864, "dataset_name": "librispeech_asr"}, "test.clean": {"name": "test.clean", "num_bytes": 368449831, "num_examples": 2620, "dataset_name": "librispeech_asr"}, "test.other": {"name": "test.other", "num_bytes": 353231518, "num_examples": 2939, "dataset_name": "librispeech_asr"}}, "download_checksums": {"http://www.openslr.org/resources/12/dev-clean.tar.gz": {"num_bytes": 337926286, "checksum": "76f87d090650617fca0cac8f88b9416e0ebf80350acb97b343a85fa903728ab3"}, "http://www.openslr.org/resources/12/dev-other.tar.gz": {"num_bytes": 314305928, "checksum": "12661c48e8c3fe1de2c1caa4c3e135193bfb1811584f11f569dd12645aa84365"}, "http://www.openslr.org/resources/12/test-clean.tar.gz": {"num_bytes": 346663984, "checksum": "39fde525e59672dc6d1551919b1478f724438a95aa55f874b576be21967e6c23"}, "http://www.openslr.org/resources/12/test-other.tar.gz": {"num_bytes": 328757843, "checksum": "d09c181bba5cf717b3dee7d4d592af11a3ee3a09e08ae025c5506f6ebe961c29"}, "http://www.openslr.org/resources/12/train-clean-100.tar.gz": {"num_bytes": 6387309499, "checksum": "d4ddd1d5a6ab303066f14971d768ee43278a5f2a0aa43dc716b0e64ecbbbf6e2"}, "http://www.openslr.org/resources/12/train-clean-360.tar.gz": {"num_bytes": 23049477885, "checksum": "146a56496217e96c14334a160df97fffedd6e0a04e66b9c5af0d40be3c792ecf"}, "http://www.openslr.org/resources/12/train-other-500.tar.gz": {"num_bytes": 30593501606, "checksum": "ddb22f27f96ec163645d53215559df6aa36515f26e01dd70798188350adcb6d2"}}, "download_size": 61357943031, "post_processing_size": null, "dataset_size": 63826462287, "size_in_bytes": 125184405318}}
librispeech_asr.py CHANGED
@@ -17,6 +17,8 @@
17
  """Librispeech automatic speech recognition dataset."""
18
 
19
 
 
 
20
  import datasets
21
  from datasets.tasks import AutomaticSpeechRecognition
22
 
@@ -36,27 +38,12 @@ _DESCRIPTION = """\
36
  LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,
37
  prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read
38
  audiobooks from the LibriVox project, and has been carefully segmented and aligned.87
39
-
40
- Note that in order to limit the required storage for preparing this dataset, the audio
41
- is stored in the .flac format and is not converted to a float32 array. To convert, the audio
42
- file to a float32 array, please make use of the `.map()` function as follows:
43
-
44
-
45
- ```python
46
- import soundfile as sf
47
-
48
- def map_to_array(batch):
49
- speech_array, _ = sf.read(batch["file"])
50
- batch["speech"] = speech_array
51
- return batch
52
-
53
- dataset = dataset.map(map_to_array, remove_columns=["file"])
54
- ```
55
  """
56
 
57
  _URL = "http://www.openslr.org/12"
58
  _DL_URL = "http://www.openslr.org/resources/12/"
59
 
 
60
  _DL_URLS = {
61
  "clean": {
62
  "dev": _DL_URL + "dev-clean.tar.gz",
@@ -69,6 +56,15 @@ _DL_URLS = {
69
  "dev": _DL_URL + "dev-other.tar.gz",
70
  "train.500": _DL_URL + "train-other-500.tar.gz",
71
  },
 
 
 
 
 
 
 
 
 
72
  }
73
 
74
 
@@ -91,9 +87,11 @@ class LibrispeechASR(datasets.GeneratorBasedBuilder):
91
  """Librispeech dataset."""
92
 
93
  DEFAULT_WRITER_BATCH_SIZE = 256
 
94
  BUILDER_CONFIGS = [
95
  LibrispeechASRConfig(name="clean", description="'Clean' speech."),
96
  LibrispeechASRConfig(name="other", description="'Other', more challenging, speech."),
 
97
  ]
98
 
99
  def _info(self):
@@ -117,33 +115,132 @@ class LibrispeechASR(datasets.GeneratorBasedBuilder):
117
 
118
  def _split_generators(self, dl_manager):
119
  archive_path = dl_manager.download(_DL_URLS[self.config.name])
 
 
120
 
121
  if self.config.name == "clean":
122
  train_splits = [
123
  datasets.SplitGenerator(
124
- name="train.100", gen_kwargs={"files": dl_manager.iter_archive(archive_path["train.100"])}
 
 
 
 
125
  ),
126
  datasets.SplitGenerator(
127
- name="train.360", gen_kwargs={"files": dl_manager.iter_archive(archive_path["train.360"])}
 
 
 
 
128
  ),
129
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  elif self.config.name == "other":
131
  train_splits = [
132
  datasets.SplitGenerator(
133
- name="train.500", gen_kwargs={"files": dl_manager.iter_archive(archive_path["train.500"])}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  ),
135
  ]
136
 
137
- return train_splits + [
138
- datasets.SplitGenerator(
139
- name=datasets.Split.VALIDATION, gen_kwargs={"files": dl_manager.iter_archive(archive_path["dev"])}
140
- ),
141
- datasets.SplitGenerator(
142
- name=datasets.Split.TEST, gen_kwargs={"files": dl_manager.iter_archive(archive_path["test"])}
143
- ),
144
- ]
145
 
146
- def _generate_examples(self, files):
147
  """Generate examples from a LibriSpeech archive_path."""
148
  key = 0
149
  audio_data = {}
@@ -159,6 +256,11 @@ class LibrispeechASR(datasets.GeneratorBasedBuilder):
159
  id_, transcript = line.split(" ", 1)
160
  audio_file = f"{id_}.flac"
161
  speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
 
 
 
 
 
162
  transcripts.append(
163
  {
164
  "id": id_,
 
17
  """Librispeech automatic speech recognition dataset."""
18
 
19
 
20
+ import os
21
+
22
  import datasets
23
  from datasets.tasks import AutomaticSpeechRecognition
24
 
 
38
  LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,
39
  prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read
40
  audiobooks from the LibriVox project, and has been carefully segmented and aligned.87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  """
42
 
43
  _URL = "http://www.openslr.org/12"
44
  _DL_URL = "http://www.openslr.org/resources/12/"
45
 
46
+
47
  _DL_URLS = {
48
  "clean": {
49
  "dev": _DL_URL + "dev-clean.tar.gz",
 
56
  "dev": _DL_URL + "dev-other.tar.gz",
57
  "train.500": _DL_URL + "train-other-500.tar.gz",
58
  },
59
+ "all": {
60
+ "dev.clean": _DL_URL + "dev-clean.tar.gz",
61
+ "dev.other": _DL_URL + "dev-other.tar.gz",
62
+ "test.clean": _DL_URL + "test-clean.tar.gz",
63
+ "test.other": _DL_URL + "test-other.tar.gz",
64
+ "train.clean.100": _DL_URL + "train-clean-100.tar.gz",
65
+ "train.clean.360": _DL_URL + "train-clean-360.tar.gz",
66
+ "train.other.500": _DL_URL + "train-other-500.tar.gz",
67
+ },
68
  }
69
 
70
 
 
87
  """Librispeech dataset."""
88
 
89
  DEFAULT_WRITER_BATCH_SIZE = 256
90
+ DEFAULT_CONFIG_NAME = "all"
91
  BUILDER_CONFIGS = [
92
  LibrispeechASRConfig(name="clean", description="'Clean' speech."),
93
  LibrispeechASRConfig(name="other", description="'Other', more challenging, speech."),
94
+ LibrispeechASRConfig(name="all", description="Combined clean and other dataset."),
95
  ]
96
 
97
  def _info(self):
 
115
 
116
  def _split_generators(self, dl_manager):
117
  archive_path = dl_manager.download(_DL_URLS[self.config.name])
118
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
119
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
120
 
121
  if self.config.name == "clean":
122
  train_splits = [
123
  datasets.SplitGenerator(
124
+ name="train.100",
125
+ gen_kwargs={
126
+ "local_extracted_archive": local_extracted_archive.get("train.100"),
127
+ "files": dl_manager.iter_archive(archive_path["train.100"]),
128
+ },
129
  ),
130
  datasets.SplitGenerator(
131
+ name="train.360",
132
+ gen_kwargs={
133
+ "local_extracted_archive": local_extracted_archive.get("train.360"),
134
+ "files": dl_manager.iter_archive(archive_path["train.360"]),
135
+ },
136
  ),
137
  ]
138
+ dev_splits = [
139
+ datasets.SplitGenerator(
140
+ name=datasets.Split.VALIDATION,
141
+ gen_kwargs={
142
+ "local_extracted_archive": local_extracted_archive.get("dev"),
143
+ "files": dl_manager.iter_archive(archive_path["dev"]),
144
+ },
145
+ )
146
+ ]
147
+ test_splits = [
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.TEST,
150
+ gen_kwargs={
151
+ "local_extracted_archive": local_extracted_archive.get("test"),
152
+ "files": dl_manager.iter_archive(archive_path["test"]),
153
+ },
154
+ )
155
+ ]
156
  elif self.config.name == "other":
157
  train_splits = [
158
  datasets.SplitGenerator(
159
+ name="train.500",
160
+ gen_kwargs={
161
+ "local_extracted_archive": local_extracted_archive.get("train.500"),
162
+ "files": dl_manager.iter_archive(archive_path["train.500"]),
163
+ },
164
+ )
165
+ ]
166
+ dev_splits = [
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.VALIDATION,
169
+ gen_kwargs={
170
+ "local_extracted_archive": local_extracted_archive.get("dev"),
171
+ "files": dl_manager.iter_archive(archive_path["dev"]),
172
+ },
173
+ )
174
+ ]
175
+ test_splits = [
176
+ datasets.SplitGenerator(
177
+ name=datasets.Split.TEST,
178
+ gen_kwargs={
179
+ "local_extracted_archive": local_extracted_archive.get("test"),
180
+ "files": dl_manager.iter_archive(archive_path["test"]),
181
+ },
182
+ )
183
+ ]
184
+ elif self.config.name == "all":
185
+ train_splits = [
186
+ datasets.SplitGenerator(
187
+ name="train.clean.100",
188
+ gen_kwargs={
189
+ "local_extracted_archive": local_extracted_archive.get("train.clean.100"),
190
+ "files": dl_manager.iter_archive(archive_path["train.clean.100"]),
191
+ },
192
+ ),
193
+ datasets.SplitGenerator(
194
+ name="train.clean.360",
195
+ gen_kwargs={
196
+ "local_extracted_archive": local_extracted_archive.get("train.clean.360"),
197
+ "files": dl_manager.iter_archive(archive_path["train.clean.360"]),
198
+ },
199
+ ),
200
+ datasets.SplitGenerator(
201
+ name="train.other.500",
202
+ gen_kwargs={
203
+ "local_extracted_archive": local_extracted_archive.get("train.other.500"),
204
+ "files": dl_manager.iter_archive(archive_path["train.other.500"]),
205
+ },
206
+ ),
207
+ ]
208
+ dev_splits = [
209
+ datasets.SplitGenerator(
210
+ name="validation.clean",
211
+ gen_kwargs={
212
+ "local_extracted_archive": local_extracted_archive.get("validation.clean"),
213
+ "files": dl_manager.iter_archive(archive_path["dev.clean"]),
214
+ },
215
+ ),
216
+ datasets.SplitGenerator(
217
+ name="validation.other",
218
+ gen_kwargs={
219
+ "local_extracted_archive": local_extracted_archive.get("validation.other"),
220
+ "files": dl_manager.iter_archive(archive_path["dev.other"]),
221
+ },
222
+ ),
223
+ ]
224
+ test_splits = [
225
+ datasets.SplitGenerator(
226
+ name="test.clean",
227
+ gen_kwargs={
228
+ "local_extracted_archive": local_extracted_archive.get("test.clean"),
229
+ "files": dl_manager.iter_archive(archive_path["test.clean"]),
230
+ },
231
+ ),
232
+ datasets.SplitGenerator(
233
+ name="test.other",
234
+ gen_kwargs={
235
+ "local_extracted_archive": local_extracted_archive.get("test.other"),
236
+ "files": dl_manager.iter_archive(archive_path["test.other"]),
237
+ },
238
  ),
239
  ]
240
 
241
+ return train_splits + dev_splits + test_splits
 
 
 
 
 
 
 
242
 
243
+ def _generate_examples(self, files, local_extracted_archive):
244
  """Generate examples from a LibriSpeech archive_path."""
245
  key = 0
246
  audio_data = {}
 
256
  id_, transcript = line.split(" ", 1)
257
  audio_file = f"{id_}.flac"
258
  speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
259
+ audio_file = (
260
+ os.path.join(local_extracted_archive, audio_file)
261
+ if local_extracted_archive
262
+ else audio_file
263
+ )
264
  transcripts.append(
265
  {
266
  "id": id_,