Samuael commited on
Commit
a2fa501
·
1 Parent(s): 59c47a2

Minor change on script

Browse files
Files changed (1) hide show
  1. alffamharic_asr.py +22 -28
alffamharic_asr.py CHANGED
@@ -31,7 +31,7 @@ _CITATION = """\
31
  ldc_catalog_no={LDC93S1},
32
  DOI={https://doi.org/10.35111/17gk-bn40},
33
  journal={Linguistic Data Consortium, Philadelphia},
34
- year={2023}
35
  }
36
  """
37
 
@@ -39,8 +39,8 @@ _DESCRIPTION = """\
39
  The ALFFAAmharic corpus of reading speech has been developed to provide speech data for acoustic-phonetic research studies
40
  and for the evaluation of automatic speech recognition systems.
41
 
42
- ALFFAAmharic contains high quality recordings of 630 individuals/speakers with 8 different American English dialects,
43
- with each individual reading upto 10 phonetically rich sentences.
44
 
45
  More info on ALFFAAmharic dataset can be understood from the "README" which can be found here:
46
  https://catalog.ldc.upenn.edu/docs/LDC93S1/readme.txt
@@ -65,17 +65,17 @@ class ALFFAAmharicASRConfig(datasets.BuilderConfig):
65
 
66
 
67
  class ALFFAAmharic(datasets.GeneratorBasedBuilder):
68
- """TimitASR dataset."""
69
 
70
- BUILDER_CONFIGS = [TimitASRConfig(name="clean", description="'Clean' speech.")]
71
 
72
  @property
73
  def manual_download_instructions(self):
74
  return (
75
- "To use TIMIT you have to download it manually. "
76
  "Please create an account and download the dataset from https://catalog.ldc.upenn.edu/LDC93S1 \n"
77
  "Then extract all files in one folder and load the dataset with: "
78
- "`datasets.load_dataset('timit_asr', data_dir='path/to/folder/folder_name')`"
79
  )
80
 
81
  def _info(self):
@@ -86,7 +86,6 @@ class ALFFAAmharic(datasets.GeneratorBasedBuilder):
86
  "file": datasets.Value("string"),
87
  "audio": datasets.Audio(sampling_rate=16_000),
88
  "text": datasets.Value("string"),
89
- "speaker_id": datasets.Value("string"),
90
  "id": datasets.Value("string"),
91
  }
92
  ),
@@ -102,7 +101,7 @@ class ALFFAAmharic(datasets.GeneratorBasedBuilder):
102
 
103
  if not os.path.exists(data_dir):
104
  raise FileNotFoundError(
105
- f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('timit_asr', data_dir=...)` that includes files unzipped from the TIMIT zip. Manual download instructions: {self.manual_download_instructions}"
106
  )
107
 
108
  return [
@@ -111,33 +110,28 @@ class ALFFAAmharic(datasets.GeneratorBasedBuilder):
111
  ]
112
 
113
  def _generate_examples(self, split, data_dir):
114
- """Generate examples from TIMIT archive_path based on the test/train csv information."""
115
-
116
-
117
-
118
  # Iterating the contents of the data to extract the relevant information
119
- wav_paths = sorted(Path(data_dir).glob(f"**/{split}/**/*.wav"))
120
- wav_paths = wav_paths if wav_paths else sorted(Path(data_dir).glob(f"**/{split.upper()}/**/*.WAV"))
121
- for key, wav_path in enumerate(wav_paths):
122
-
123
- # extract transcript
124
- txt_path = with_case_insensitive_suffix(wav_path, ".txt")
125
- with txt_path.open(encoding="utf-8") as op:
126
- transcript = " ".join(op.readlines()[0].split()[2:]) # first two items are sample number
127
-
128
- speaker_id = wav_path.parents[0].name[1:]
129
  id_ = wav_path.stem
130
-
131
- example = {
132
  "file": str(wav_path),
133
  "audio": str(wav_path),
134
  "text": transcript,
135
- "speaker_id": speaker_id,
136
  "id": id_,
137
  }
138
 
139
- yield key, example
140
-
141
 
142
  def with_case_insensitive_suffix(path: Path, suffix: str):
143
  path = path.with_suffix(suffix.lower())
 
31
  ldc_catalog_no={LDC93S1},
32
  DOI={https://doi.org/10.35111/17gk-bn40},
33
  journal={Linguistic Data Consortium, Philadelphia},
34
+ year={1983}
35
  }
36
  """
37
 
 
39
  The ALFFAAmharic corpus of reading speech has been developed to provide speech data for acoustic-phonetic research studies
40
  and for the evaluation of automatic speech recognition systems.
41
 
42
+ ALFFAAmharic contains high-quality recordings of 630 individuals/speakers with 8 different American English dialects,
43
+ with each individual reading up to 10 phonetically rich sentences.
44
 
45
  More info on ALFFAAmharic dataset can be understood from the "README" which can be found here:
46
  https://catalog.ldc.upenn.edu/docs/LDC93S1/readme.txt
 
65
 
66
 
67
  class ALFFAAmharic(datasets.GeneratorBasedBuilder):
68
+ """ALFFAAmharicASR dataset."""
69
 
70
+ BUILDER_CONFIGS = [ALFFAAmharicASRConfig(name="clean", description="'Clean' speech.")]
71
 
72
  @property
73
  def manual_download_instructions(self):
74
  return (
75
+ "To use ALFFAAmharic you have to download it manually. "
76
  "Please create an account and download the dataset from https://catalog.ldc.upenn.edu/LDC93S1 \n"
77
  "Then extract all files in one folder and load the dataset with: "
78
+ "`datasets.load_dataset('ALFFAAmharic_asr', data_dir='path/to/folder/folder_name')`"
79
  )
80
 
81
  def _info(self):
 
86
  "file": datasets.Value("string"),
87
  "audio": datasets.Audio(sampling_rate=16_000),
88
  "text": datasets.Value("string"),
 
89
  "id": datasets.Value("string"),
90
  }
91
  ),
 
101
 
102
  if not os.path.exists(data_dir):
103
  raise FileNotFoundError(
104
+ f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('ALFFAAmharic_asr', data_dir=...)` that includes files unzipped from the ALFFAAmharic zip. Manual download instructions: {self.manual_download_instructions}"
105
  )
106
 
107
  return [
 
110
  ]
111
 
112
  def _generate_examples(self, split, data_dir):
113
+ """Generate examples from ALFFAAmharic archive_path based on the test/train csv information."""
114
+ file = open(f"{data_dir}{split}/text.txt", "r", encoding="utf-8")
115
+ file.close()
116
+ lines = file.readlines()
117
  # Iterating the contents of the data to extract the relevant information
118
+
119
+ for i in range(len(lines)):
120
+ splited = lines[i].split(" ")
121
+ if len(splited)==0:
122
+ continue
123
+ wav_path = f"{data_dir}{split}/wav/{splited[0]}.wav"
124
+ transcript = " ".join(splited[0:])
125
+
 
 
126
  id_ = wav_path.stem
127
+
128
+ yield i, {
129
  "file": str(wav_path),
130
  "audio": str(wav_path),
131
  "text": transcript,
 
132
  "id": id_,
133
  }
134
 
 
 
135
 
136
  def with_case_insensitive_suffix(path: Path, suffix: str):
137
  path = path.with_suffix(suffix.lower())