sanchit-gandhi
commited on
Commit
·
3144062
1
Parent(s):
693cf23
file ids - transcriptions
Browse files- tedlium.py +9 -12
tedlium.py
CHANGED
@@ -13,7 +13,7 @@
|
|
13 |
# limitations under the License.
|
14 |
|
15 |
"""TED-LIUM speech recognition dataset."""
|
16 |
-
|
17 |
import os
|
18 |
import re
|
19 |
from collections import defaultdict
|
@@ -31,8 +31,8 @@ _DL_URL = "https://huggingface.co/datasets/LIUM/tedlium/resolve/main/"
|
|
31 |
|
32 |
_LICENSE = "licensed under Creative Commons BY-NC-ND 3.0 (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en)"
|
33 |
|
34 |
-
_WHISPER_TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/
|
35 |
-
_WHISPER_TRANSCRIPT_URLs = _WHISPER_TRANSCRIPT_URL + "/{split}-transcription.
|
36 |
|
37 |
|
38 |
class TedliumReleaseConfig(datasets.BuilderConfig):
|
@@ -271,12 +271,11 @@ class TedLium(datasets.GeneratorBasedBuilder):
|
|
271 |
return splits
|
272 |
|
273 |
def _generate_examples(self, filepath, local_extracted_archive, split_path, whisper_transcript):
|
274 |
-
|
275 |
-
|
276 |
with open(whisper_transcript, encoding="utf-8") as f:
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
|
281 |
"""Generate examples from a TED-LIUM stm file."""
|
282 |
if local_extracted_archive:
|
@@ -308,10 +307,9 @@ class TedLium(datasets.GeneratorBasedBuilder):
|
|
308 |
"gender": _parse_gender(label),
|
309 |
"file": audio_file,
|
310 |
"id": key,
|
311 |
-
"whisper_transcript":
|
312 |
}
|
313 |
yield key, example
|
314 |
-
idx += 1
|
315 |
|
316 |
else:
|
317 |
audio_data = {}
|
@@ -359,7 +357,7 @@ class TedLium(datasets.GeneratorBasedBuilder):
|
|
359 |
audio = {"path": transcript["file"], "array": samples, "sampling_rate": sampling_rate}
|
360 |
key = transcript["id"]
|
361 |
transcript_text = transcript["text"]
|
362 |
-
whisper_transcription =
|
363 |
yield key, {
|
364 |
"audio": audio,
|
365 |
"text": transcript_text,
|
@@ -369,7 +367,6 @@ class TedLium(datasets.GeneratorBasedBuilder):
|
|
369 |
"id": transcript["id"],
|
370 |
"whisper_transcript": whisper_transcription
|
371 |
}
|
372 |
-
idx += 1
|
373 |
|
374 |
audio_data = {}
|
375 |
transcripts = defaultdict(list)
|
|
|
13 |
# limitations under the License.
|
14 |
|
15 |
"""TED-LIUM speech recognition dataset."""
|
16 |
+
import csv
|
17 |
import os
|
18 |
import re
|
19 |
from collections import defaultdict
|
|
|
31 |
|
32 |
_LICENSE = "licensed under Creative Commons BY-NC-ND 3.0 (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en)"
|
33 |
|
34 |
+
_WHISPER_TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/whisper_transcriptions_greedy/resolve/main/tedlium"
|
35 |
+
_WHISPER_TRANSCRIPT_URLs = _WHISPER_TRANSCRIPT_URL + "/{split}-transcription.csv"
|
36 |
|
37 |
|
38 |
class TedliumReleaseConfig(datasets.BuilderConfig):
|
|
|
271 |
return splits
|
272 |
|
273 |
def _generate_examples(self, filepath, local_extracted_archive, split_path, whisper_transcript):
|
274 |
+
whisper_transcriptions = dict()
|
|
|
275 |
with open(whisper_transcript, encoding="utf-8") as f:
|
276 |
+
reader = csv.DictReader(f, delimiter=",", quoting=csv.QUOTE_NONE)
|
277 |
+
for line in reader:
|
278 |
+
whisper_transcriptions[line["file_id"]] = line["whisper_transcript"]
|
279 |
|
280 |
"""Generate examples from a TED-LIUM stm file."""
|
281 |
if local_extracted_archive:
|
|
|
307 |
"gender": _parse_gender(label),
|
308 |
"file": audio_file,
|
309 |
"id": key,
|
310 |
+
"whisper_transcript": whisper_transcriptions.get(key, None)
|
311 |
}
|
312 |
yield key, example
|
|
|
313 |
|
314 |
else:
|
315 |
audio_data = {}
|
|
|
357 |
audio = {"path": transcript["file"], "array": samples, "sampling_rate": sampling_rate}
|
358 |
key = transcript["id"]
|
359 |
transcript_text = transcript["text"]
|
360 |
+
whisper_transcription = whisper_transcriptions.get(key, None) if transcript_text != "ignore_time_segment_in_scoring" else "ignore_time_segment_in_scoring"
|
361 |
yield key, {
|
362 |
"audio": audio,
|
363 |
"text": transcript_text,
|
|
|
367 |
"id": transcript["id"],
|
368 |
"whisper_transcript": whisper_transcription
|
369 |
}
|
|
|
370 |
|
371 |
audio_data = {}
|
372 |
transcripts = defaultdict(list)
|