Samuael commited on
Commit
31b1eb5
·
1 Parent(s): 613e985

Upload alffamharic_asr.py

Browse files
Files changed (1) hide show
  1. alffamharic_asr.py +145 -0
alffamharic_asr.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """ALFFAAmharic automatic speech recognition dataset."""
18
+
19
+
20
+ import os
21
+ from pathlib import Path
22
+
23
+ import datasets
24
+ from datasets.tasks import AutomaticSpeechRecognition
25
+
26
+
27
+ _CITATION = """\
28
+ @inproceedings{
29
+ title={ALFFAAmharic Acoustic-Phonetic Continuous Speech Corpus},
30
+ author={Garofolo, John S., et al},
31
+ ldc_catalog_no={LDC93S1},
32
+ DOI={https://doi.org/10.35111/17gk-bn40},
33
+ journal={Linguistic Data Consortium, Philadelphia},
34
+ year={1983}
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = """\
39
+ The ALFFAAmharic corpus of reading speech has been developed to provide speech data for acoustic-phonetic research studies
40
+ and for the evaluation of automatic speech recognition systems.
41
+
42
+ ALFFAAmharic contains high quality recordings of 630 individuals/speakers with 8 different American English dialects,
43
+ with each individual reading upto 10 phonetically rich sentences.
44
+
45
+ More info on ALFFAAmharic dataset can be understood from the "README" which can be found here:
46
+ https://catalog.ldc.upenn.edu/docs/LDC93S1/readme.txt
47
+ """
48
+
49
+ _HOMEPAGE = "https://catalog.ldc.upenn.edu/LDC93S1"
50
+
51
+
52
+ class ALFFAAmharicASRConfig(datasets.BuilderConfig):
53
+ """BuilderConfig for ALFFAAmharicASR."""
54
+
55
+ def __init__(self, **kwargs):
56
+ """
57
+ Args:
58
+ data_dir: `string`, the path to the folder containing the files in the
59
+ downloaded .tar
60
+ citation: `string`, citation for the data set
61
+ url: `string`, url for information about the data set
62
+ **kwargs: keyword arguments forwarded to super.
63
+ """
64
+ super(ALFFAAmharicASRConfig, self).__init__(version=datasets.Version("2.0.1", ""), **kwargs)
65
+
66
+
67
+ class ALFFAAmharic(datasets.GeneratorBasedBuilder):
68
+ """TimitASR dataset."""
69
+
70
+ BUILDER_CONFIGS = [TimitASRConfig(name="clean", description="'Clean' speech.")]
71
+
72
+ @property
73
+ def manual_download_instructions(self):
74
+ return (
75
+ "To use TIMIT you have to download it manually. "
76
+ "Please create an account and download the dataset from https://catalog.ldc.upenn.edu/LDC93S1 \n"
77
+ "Then extract all files in one folder and load the dataset with: "
78
+ "`datasets.load_dataset('timit_asr', data_dir='path/to/folder/folder_name')`"
79
+ )
80
+
81
+ def _info(self):
82
+ return datasets.DatasetInfo(
83
+ description=_DESCRIPTION,
84
+ features=datasets.Features(
85
+ {
86
+ "file": datasets.Value("string"),
87
+ "audio": datasets.Audio(sampling_rate=16_000),
88
+ "text": datasets.Value("string"),
89
+ "speaker_id": datasets.Value("string"),
90
+ "id": datasets.Value("string"),
91
+ }
92
+ ),
93
+ supervised_keys=("file", "text"),
94
+ homepage=_HOMEPAGE,
95
+ citation=_CITATION,
96
+ task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
97
+ )
98
+
99
+ def _split_generators(self, dl_manager):
100
+
101
+ data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
102
+
103
+ if not os.path.exists(data_dir):
104
+ raise FileNotFoundError(
105
+ f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('timit_asr', data_dir=...)` that includes files unzipped from the TIMIT zip. Manual download instructions: {self.manual_download_instructions}"
106
+ )
107
+
108
+ return [
109
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split": "train", "data_dir": data_dir}),
110
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split": "test", "data_dir": data_dir}),
111
+ ]
112
+
113
+ def _generate_examples(self, split, data_dir):
114
+ """Generate examples from TIMIT archive_path based on the test/train csv information."""
115
+
116
+
117
+
118
+ # Iterating the contents of the data to extract the relevant information
119
+ wav_paths = sorted(Path(data_dir).glob(f"**/{split}/**/*.wav"))
120
+ wav_paths = wav_paths if wav_paths else sorted(Path(data_dir).glob(f"**/{split.upper()}/**/*.WAV"))
121
+ for key, wav_path in enumerate(wav_paths):
122
+
123
+ # extract transcript
124
+ txt_path = with_case_insensitive_suffix(wav_path, ".txt")
125
+ with txt_path.open(encoding="utf-8") as op:
126
+ transcript = " ".join(op.readlines()[0].split()[2:]) # first two items are sample number
127
+
128
+ speaker_id = wav_path.parents[0].name[1:]
129
+ id_ = wav_path.stem
130
+
131
+ example = {
132
+ "file": str(wav_path),
133
+ "audio": str(wav_path),
134
+ "text": transcript,
135
+ "speaker_id": speaker_id,
136
+ "id": id_,
137
+ }
138
+
139
+ yield key, example
140
+
141
+
142
+ def with_case_insensitive_suffix(path: Path, suffix: str):
143
+ path = path.with_suffix(suffix.lower())
144
+ path = path if path.exists() else path.with_suffix(suffix.upper())
145
+ return path