|
|
|
|
|
import pandas as pd |
|
|
|
import datasets |
|
from datasets.tasks import AutomaticSpeechRecognition |
|
|
|
|
|
_CITATION = """\ |
|
|
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
|
|
""" |
|
|
|
_HOMEPAGE = "https://zenodo.org/record/5541827" |
|
|
|
_LICENSE = "Creative Commons Attribution 4.0 International" |
|
|
|
_INDEX_REPO = "https://huggingface.co/datasets/proxectonos/Nos_Parlaspeech-GL/tree/main/" |
|
|
|
|
|
_URLS = { |
|
"index": _INDEX_REPO + "data/{config}/{split}/{config}_{split}.tsv", |
|
"audio": "audio/{config}/{split}/{config}_{split}.tar", |
|
|
|
} |
|
_SPLITS = {datasets.Split.TRAIN: "train", datasets.Split.VALIDATION: "dev", datasets.Split.TEST: "test"} |
|
|
|
|
|
class ParlaSpeech(datasets.GeneratorBasedBuilder): |
|
"""Nos-ParlaSpeech.""" |
|
|
|
VERSION = datasets.Version("1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="clean", version=VERSION, description="XX hours of clean quality segments."), |
|
datasets.BuilderConfig(name="other", version=VERSION, description="XX hours of other quality segments."), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"path": datasets.Value("string"), |
|
"audio": datasets.features.Audio(), |
|
"speaker_id": datasets.Value("int64"), |
|
"sentence": datasets.Value("string"), |
|
"gender": datasets.ClassLabel(names=["F", "M"]), |
|
"duration": datasets.Value("float64"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
task_templates=[ |
|
AutomaticSpeechRecognition(transcription_column="sentence") |
|
], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = { |
|
split: {key: url.format(config=self.config.name, split=_SPLITS[split]) for key, url in _URLS.items()} |
|
for split in _SPLITS |
|
} |
|
dl_dir = dl_manager.download(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={ |
|
"index_path": dl_dir[split]["index"], |
|
"audio_files": dl_manager.iter_archive(dl_dir[split]["audio"]), |
|
}, |
|
) |
|
for split in _SPLITS |
|
] |
|
|
|
def _generate_examples(self, index_path, audio_files): |
|
with open(index_path, encoding="utf-8") as index_file: |
|
index = pd.read_csv(index_file, delimiter="\t", index_col="path").to_dict(orient="index") |
|
|
|
for key, (path, file) in enumerate(audio_files): |
|
if path.endswith(".wav"): |
|
data = index.pop(path) |
|
audio = {"path": path, "bytes": file.read()} |
|
yield key, {"path": path, "audio": audio, **data} |
|
else: |
|
path = path + ".wav" |
|
data = index.pop(path) |
|
audio = {"path": path, "bytes": file.read()} |
|
yield key, {"path": path, "audio": audio, **data} |
|
|