|
import LANGUAGES as LANGUAGES |
|
import STATS as STATS |
|
import datasets as datasets |
|
from datasets.utils.py_utils import size_str |
|
|
|
_HOMEPAGE = "homepage-info" |
|
_CITATION = "citation-info" |
|
_LICENSE = "license-info" |
|
_DESCRIPTION = "description-info" |
|
|
|
_PROMPTS_URLS = "....." |
|
_DATA_URL = "...." |
|
|
|
|
|
|
|
"""Configuration class, allows to have multiple configurations if needed""" |
|
class ParlaSpeechDatasetConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for ParlaSpeech""" |
|
|
|
def __init__(self, name, version, **kwargs): |
|
self.language = kwargs.pop("language", None) |
|
self.release_date = kwargs.pop("release_date", None) |
|
self.num_clips = kwargs.pop("num_clips", None) |
|
self.num_speakers = kwargs.pop("num_speakers", None) |
|
self.validated_hr = kwargs.pop("validated_hr", None) |
|
self.total_hr = kwargs.pop("total_hr", None) |
|
self.size_bytes = kwargs.pop("size_bytes", None) |
|
self.size_human = size_str(self.size_bytes) |
|
description = ( |
|
f"ParlaSpeech is a dataset in {self.language} released on {self.release_date}. " |
|
) |
|
super(ParlaSpeechDatasetConfig, self).__init__( |
|
name=name, |
|
version=datasets.Version(version), |
|
description=description, |
|
**kwargs, |
|
) |
|
|
|
|
|
class ParlaSpeechDataset(datasets.GeneratroBasedBuilder): |
|
|
|
"""" |
|
### NO TENGO CLARO SI HACE FALTA ESTO ### |
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
BUILDER_CONFIGS = [ |
|
ParlaSpeechDatasetConfig( |
|
name=lang, |
|
version=STATS["version"], |
|
language=LANGUAGES[lang], |
|
release_date=STATS["date"], |
|
num_clips=lang_stats["clips"], |
|
num_speakers=lang_stats["users"], |
|
total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None, |
|
size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None, |
|
) |
|
for lang, lang_stats in STATS["locales"].items() |
|
] |
|
""" |
|
|
|
""" When the dataset is loaded and .info is called, the info defined here is displayed.""" |
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
|
|
|
|
"path": datasets.Audio(sampling_rate=16_000), |
|
"sentence": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
version = self.config.version, |
|
) |
|
|
|
" Used to organize the audio files and sentence prompts in each split, once downloaded the dataset." |
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators""" |
|
prompts_paths = dl_manager.download(_PROMPTS_URLS) |
|
archive = dl_manager.download(_DATA_URL) |
|
|
|
train_dir = "vivos/train" |
|
test_dir = "vivos/test" |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"prompts_path": prompts_paths["train"], |
|
"path_to_clips": train_dir + "/waves", |
|
"audio_files": dl_manager.iter_archive(archive), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"prompts_path": prompts_paths["test"], |
|
"path_to_clips": test_dir + "/waves", |
|
"audio_files": dl_manager.iter_archive(archive), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, prompts_path, path_to_clips, audio_files): |
|
"""Yields examples as (key, example) tuples.""" |
|
examples = {} |
|
with open(prompts_path, encoding="utf-8") as f: |
|
for row in f: |
|
data = row.strip().split(" ", 1) |
|
|
|
|
|
audio_path = "/".join([path_to_clips, "DSPG_137_23122015_9873.69_9888.03.wav"]) |
|
examples[audio_path] = { |
|
|
|
"path": audio_path, |
|
"sentence": data[1], |
|
} |
|
inside_clips_dir = False |
|
id_ = 0 |
|
for path, f in audio_files: |
|
if path.startswith(path_to_clips): |
|
inside_clips_dir = True |
|
if path in examples: |
|
audio = {"path": path, "bytes": f.read()} |
|
yield id_, {**examples[path], "audio": audio} |
|
id_ += 1 |
|
elif inside_clips_dir: |
|
break |
|
|