parlaspeech-tests / my_dataset.py
gcjavi's picture
path format changed to audio
91bc1a9
raw
history blame
5 kB
import LANGUAGES as LANGUAGES
import STATS as STATS
import datasets as datasets
from datasets.utils.py_utils import size_str
_HOMEPAGE = "homepage-info"
_CITATION = "citation-info"
_LICENSE = "license-info"
_DESCRIPTION = "description-info"
_PROMPTS_URLS = "....."
_DATA_URL = "...."
"""Configuration class, allows to have multiple configurations if needed"""
class ParlaSpeechDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for ParlaSpeech"""
def __init__(self, name, version, **kwargs):
self.language = kwargs.pop("language", None)
self.release_date = kwargs.pop("release_date", None)
self.num_clips = kwargs.pop("num_clips", None)
self.num_speakers = kwargs.pop("num_speakers", None)
self.validated_hr = kwargs.pop("validated_hr", None)
self.total_hr = kwargs.pop("total_hr", None)
self.size_bytes = kwargs.pop("size_bytes", None)
self.size_human = size_str(self.size_bytes)
description = ( ##Update Description in the final version
f"ParlaSpeech is a dataset in {self.language} released on {self.release_date}. "
)
super(ParlaSpeechDatasetConfig, self).__init__(
name=name,
version=datasets.Version(version),
description=description,
**kwargs,
)
class ParlaSpeechDataset(datasets.GeneratroBasedBuilder):
""""
### NO TENGO CLARO SI HACE FALTA ESTO ###
DEFAULT_CONFIG_NAME = "all"
BUILDER_CONFIGS = [
ParlaSpeechDatasetConfig(
name=lang,
version=STATS["version"],
language=LANGUAGES[lang],
release_date=STATS["date"],
num_clips=lang_stats["clips"],
num_speakers=lang_stats["users"],
total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
)
for lang, lang_stats in STATS["locales"].items()
]
"""
""" When the dataset is loaded and .info is called, the info defined here is displayed."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
#"speaker_id": datasets.Value("string"),
#"path": datasets.Value("string"),
"path": datasets.Audio(sampling_rate=16_000),
"sentence": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
version = self.config.version,
)
" Used to organize the audio files and sentence prompts in each split, once downloaded the dataset."
def _split_generators(self, dl_manager):
"""Returns SplitGenerators"""
prompts_paths = dl_manager.download(_PROMPTS_URLS)
archive = dl_manager.download(_DATA_URL)
## local_extracted_archives = dl_manager.extract(archive)
train_dir = "vivos/train"
test_dir = "vivos/test"
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"prompts_path": prompts_paths["train"],
"path_to_clips": train_dir + "/waves",
"audio_files": dl_manager.iter_archive(archive),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"prompts_path": prompts_paths["test"],
"path_to_clips": test_dir + "/waves",
"audio_files": dl_manager.iter_archive(archive),
},
),
]
def _generate_examples(self, prompts_path, path_to_clips, audio_files):
"""Yields examples as (key, example) tuples."""
examples = {}
with open(prompts_path, encoding="utf-8") as f: ##prompts_path -> transcript.tsv
for row in f:
data = row.strip().split(" ", 1)
#speaker_id = data[0].split("_")[0]
#audio_path = "/".join([path_to_clips, speaker_id, data[0] + ".wav"])
audio_path = "/".join([path_to_clips, "DSPG_137_23122015_9873.69_9888.03.wav"])
examples[audio_path] = {
#"speaker_id": speaker_id,
"path": audio_path,
"sentence": data[1],
}
inside_clips_dir = False
id_ = 0
for path, f in audio_files:
if path.startswith(path_to_clips):
inside_clips_dir = True
if path in examples:
audio = {"path": path, "bytes": f.read()}
yield id_, {**examples[path], "audio": audio}
id_ += 1
elif inside_clips_dir:
break