Update files from the datasets library (from 1.13.3)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.13.3
README.md
CHANGED
@@ -204,6 +204,9 @@ and its transcription, called `text`. Some additional information about the spea
|
|
204 |
'words', ["hmm", "hmm", ...]
|
205 |
'channels': [0, 0, ..],
|
206 |
'file': "/.cache/huggingface/datasets/downloads/af7e748544004557b35eef8b0522d4fb2c71e004b82ba8b7343913a15def465f"
|
|
|
|
|
|
|
207 |
}
|
208 |
```
|
209 |
|
@@ -231,6 +234,8 @@ and its transcription, called `text`. Some additional information about the spea
|
|
231 |
|
232 |
- file: a path to the audio file
|
233 |
|
|
|
|
|
234 |
### Data Splits
|
235 |
|
236 |
The dataset consists of several configurations, each one having train/validation/test splits:
|
|
|
204 |
'words', ["hmm", "hmm", ...]
|
205 |
'channels': [0, 0, ..],
|
206 |
'file': "/.cache/huggingface/datasets/downloads/af7e748544004557b35eef8b0522d4fb2c71e004b82ba8b7343913a15def465f"
|
207 |
+
'audio': {'path': "/.cache/huggingface/datasets/downloads/af7e748544004557b35eef8b0522d4fb2c71e004b82ba8b7343913a15def465f",
|
208 |
+
'array': array([-0.00048828, -0.00018311, -0.00137329, ..., 0.00079346, 0.00091553, 0.00085449], dtype=float32),
|
209 |
+
'sampling_rate': 16000},
|
210 |
}
|
211 |
```
|
212 |
|
|
|
234 |
|
235 |
- file: a path to the audio file
|
236 |
|
237 |
+
- audio: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: `dataset[0]["audio"]` the audio file is automatically decoded and resampled to `dataset.features["audio"].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `"audio"` column, *i.e.* `dataset[0]["audio"]` should **always** be preferred over `dataset["audio"][0]`.
|
238 |
+
|
239 |
### Data Splits
|
240 |
|
241 |
The dataset consists of several configurations, each one having train/validation/test splits:
|
ami.py
CHANGED
@@ -318,6 +318,7 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
318 |
|
319 |
if self.config.name == "headset-single":
|
320 |
features_dict.update({"file": datasets.Value("string")})
|
|
|
321 |
config_description = (
|
322 |
"Close talking audio of single headset. "
|
323 |
"This configuration only includes audio belonging to the "
|
@@ -325,38 +326,23 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
325 |
)
|
326 |
elif self.config.name == "microphone-single":
|
327 |
features_dict.update({"file": datasets.Value("string")})
|
|
|
328 |
config_description = (
|
329 |
"Far field audio of single microphone. "
|
330 |
"This configuration only includes audio belonging the first microphone, "
|
331 |
"*i.e.* 1-1, of the microphone array."
|
332 |
)
|
333 |
elif self.config.name == "headset-multi":
|
334 |
-
features_dict.update(
|
335 |
-
|
336 |
-
"file-0": datasets.Value("string"),
|
337 |
-
"file-1": datasets.Value("string"),
|
338 |
-
"file-2": datasets.Value("string"),
|
339 |
-
"file-3": datasets.Value("string"),
|
340 |
-
}
|
341 |
-
)
|
342 |
config_description = (
|
343 |
"Close talking audio of four individual headset. "
|
344 |
"This configuration includes audio belonging to four individual headsets."
|
345 |
" For each annotation there are 4 audio files 0, 1, 2, 3."
|
346 |
)
|
347 |
elif self.config.name == "microphone-multi":
|
348 |
-
features_dict.update(
|
349 |
-
|
350 |
-
"file-1-1": datasets.Value("string"),
|
351 |
-
"file-1-2": datasets.Value("string"),
|
352 |
-
"file-1-3": datasets.Value("string"),
|
353 |
-
"file-1-4": datasets.Value("string"),
|
354 |
-
"file-1-5": datasets.Value("string"),
|
355 |
-
"file-1-6": datasets.Value("string"),
|
356 |
-
"file-1-7": datasets.Value("string"),
|
357 |
-
"file-1-8": datasets.Value("string"),
|
358 |
-
}
|
359 |
-
)
|
360 |
config_description = (
|
361 |
"Far field audio of microphone array. "
|
362 |
"This configuration includes audio of "
|
@@ -570,11 +556,13 @@ class AMI(datasets.GeneratorBasedBuilder):
|
|
570 |
}
|
571 |
|
572 |
if self.config.name in ["headset-single", "microphone-single"]:
|
573 |
-
result.update({"file": samples_paths_dict[_id][0]})
|
574 |
elif self.config.name in ["headset-multi"]:
|
575 |
result.update({f"file-{i}": samples_paths_dict[_id][i] for i in range(num_audios)})
|
|
|
576 |
elif self.config.name in ["microphone-multi"]:
|
577 |
result.update({f"file-1-{i+1}": samples_paths_dict[_id][i] for i in range(num_audios)})
|
|
|
578 |
else:
|
579 |
raise ValueError(f"Configuration {self.config.name} does not exist.")
|
580 |
|
|
|
318 |
|
319 |
if self.config.name == "headset-single":
|
320 |
features_dict.update({"file": datasets.Value("string")})
|
321 |
+
features_dict.update({"audio": datasets.features.Audio(sampling_rate=16_000)})
|
322 |
config_description = (
|
323 |
"Close talking audio of single headset. "
|
324 |
"This configuration only includes audio belonging to the "
|
|
|
326 |
)
|
327 |
elif self.config.name == "microphone-single":
|
328 |
features_dict.update({"file": datasets.Value("string")})
|
329 |
+
features_dict.update({"audio": datasets.features.Audio(sampling_rate=16_000)})
|
330 |
config_description = (
|
331 |
"Far field audio of single microphone. "
|
332 |
"This configuration only includes audio belonging the first microphone, "
|
333 |
"*i.e.* 1-1, of the microphone array."
|
334 |
)
|
335 |
elif self.config.name == "headset-multi":
|
336 |
+
features_dict.update({f"file-{i}": datasets.Value("string") for i in range(4)})
|
337 |
+
features_dict.update({f"file-{i}": datasets.features.Audio(sampling_rate=16_000) for i in range(4)})
|
|
|
|
|
|
|
|
|
|
|
|
|
338 |
config_description = (
|
339 |
"Close talking audio of four individual headset. "
|
340 |
"This configuration includes audio belonging to four individual headsets."
|
341 |
" For each annotation there are 4 audio files 0, 1, 2, 3."
|
342 |
)
|
343 |
elif self.config.name == "microphone-multi":
|
344 |
+
features_dict.update({f"file-1-{i}": datasets.Value("string") for i in range(1, 8)})
|
345 |
+
features_dict.update({f"file-1-{i}": datasets.features.Audio(sampling_rate=16_000) for i in range(1, 8)})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
346 |
config_description = (
|
347 |
"Far field audio of microphone array. "
|
348 |
"This configuration includes audio of "
|
|
|
556 |
}
|
557 |
|
558 |
if self.config.name in ["headset-single", "microphone-single"]:
|
559 |
+
result.update({"file": samples_paths_dict[_id][0], "audio": samples_paths_dict[_id][0]})
|
560 |
elif self.config.name in ["headset-multi"]:
|
561 |
result.update({f"file-{i}": samples_paths_dict[_id][i] for i in range(num_audios)})
|
562 |
+
result.update({f"audio-{i}": samples_paths_dict[_id][i] for i in range(num_audios)})
|
563 |
elif self.config.name in ["microphone-multi"]:
|
564 |
result.update({f"file-1-{i+1}": samples_paths_dict[_id][i] for i in range(num_audios)})
|
565 |
+
result.update({f"audio-1-{i+1}": samples_paths_dict[_id][i] for i in range(num_audios)})
|
566 |
else:
|
567 |
raise ValueError(f"Configuration {self.config.name} does not exist.")
|
568 |
|