File size: 4,995 Bytes
e9d61e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91bc1a9
 
e9d61e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91bc1a9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import LANGUAGES as LANGUAGES
import STATS as STATS
import datasets as datasets
from datasets.utils.py_utils import size_str

_HOMEPAGE = "homepage-info"
_CITATION = "citation-info"
_LICENSE = "license-info"
_DESCRIPTION = "description-info"

_PROMPTS_URLS = "....."
_DATA_URL = "...."



"""Configuration class, allows to have multiple configurations if needed"""
class ParlaSpeechDatasetConfig(datasets.BuilderConfig):
    """BuilderConfig for ParlaSpeech"""

    def __init__(self, name, version, **kwargs):
        self.language = kwargs.pop("language", None)
        self.release_date = kwargs.pop("release_date", None)
        self.num_clips = kwargs.pop("num_clips", None)
        self.num_speakers = kwargs.pop("num_speakers", None)
        self.validated_hr = kwargs.pop("validated_hr", None)
        self.total_hr = kwargs.pop("total_hr", None)
        self.size_bytes = kwargs.pop("size_bytes", None)
        self.size_human = size_str(self.size_bytes)
        description = ( ##Update Description in the final version
            f"ParlaSpeech is a dataset in {self.language} released on {self.release_date}. "
        )
        super(ParlaSpeechDatasetConfig, self).__init__(
            name=name,
            version=datasets.Version(version),
            description=description,
            **kwargs,
        )


class ParlaSpeechDataset(datasets.GeneratroBasedBuilder):

    """"
    ### NO TENGO CLARO SI HACE FALTA ESTO ###
    DEFAULT_CONFIG_NAME = "all"

    BUILDER_CONFIGS = [
        ParlaSpeechDatasetConfig(
            name=lang,
            version=STATS["version"],
            language=LANGUAGES[lang],
            release_date=STATS["date"],
            num_clips=lang_stats["clips"],
            num_speakers=lang_stats["users"],
            total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
            size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
        )
        for lang, lang_stats in STATS["locales"].items()
    ]
    """

    """ When the dataset is loaded and .info is called, the info defined here is displayed."""
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    #"speaker_id": datasets.Value("string"),
                    #"path": datasets.Value("string"),
                    "path": datasets.Audio(sampling_rate=16_000),
                    "sentence": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
            version = self.config.version,
        )

    " Used to organize the audio files and sentence prompts in each split, once downloaded the dataset."
    def _split_generators(self, dl_manager):
        """Returns SplitGenerators"""
        prompts_paths = dl_manager.download(_PROMPTS_URLS)
        archive = dl_manager.download(_DATA_URL)
        ## local_extracted_archives = dl_manager.extract(archive)
        train_dir = "vivos/train"
        test_dir = "vivos/test"

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "prompts_path": prompts_paths["train"],
                    "path_to_clips": train_dir + "/waves",
                    "audio_files": dl_manager.iter_archive(archive),
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "prompts_path": prompts_paths["test"],
                    "path_to_clips": test_dir + "/waves",
                    "audio_files": dl_manager.iter_archive(archive),
                },
            ),
        ]

    def _generate_examples(self, prompts_path, path_to_clips, audio_files):
        """Yields examples as (key, example) tuples."""
        examples = {}
        with open(prompts_path, encoding="utf-8") as f: ##prompts_path -> transcript.tsv
            for row in f:
                data = row.strip().split(" ", 1)
                #speaker_id = data[0].split("_")[0]
                #audio_path = "/".join([path_to_clips, speaker_id, data[0] + ".wav"])
                audio_path = "/".join([path_to_clips, "DSPG_137_23122015_9873.69_9888.03.wav"])
                examples[audio_path] = {
                    #"speaker_id": speaker_id,
                    "path": audio_path,
                    "sentence": data[1],
                }
        inside_clips_dir = False
        id_ = 0
        for path, f in audio_files:
            if path.startswith(path_to_clips):
                inside_clips_dir = True
                if path in examples:
                    audio = {"path": path, "bytes": f.read()}
                    yield id_, {**examples[path], "audio": audio}
                    id_ += 1
            elif inside_clips_dir:
                break