cdminix commited on
Commit
3e94f52
1 Parent(s): 963269a

Create libritts-phones-and-mel.py

Browse files
Files changed (1) hide show
  1. libritts-phones-and-mel.py +159 -0
libritts-phones-and-mel.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """LibriSpeech dataset with phone alignments, prosody and mel spectrograms."""
2
+
3
+ import os
4
+ from pathlib import Path
5
+ import hashlib
6
+ import pickle
7
+
8
+ import datasets
9
+ import pandas as pd
10
+ import numpy as np
11
+ from tqdm.contrib.concurrent import process_map
12
+ from tqdm.auto import tqdm
13
+ from multiprocessing import cpu_count
14
+ from PIL import Image
15
+
16
+ logger = datasets.logging.get_logger(__name__)
17
+
18
+ _VERSION = "0.0.1"
19
+
20
+ _CITATION = """\
21
+ @inproceedings{48008,
22
+ title = {LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech},
23
+ author = {Heiga Zen and Rob Clark and Ron J. Weiss and Viet Dang and Ye Jia and Yonghui Wu and Yu Zhang and Zhifeng Chen},
24
+ year = {2019},
25
+ URL = {https://arxiv.org/abs/1904.02882},
26
+ booktitle = {Interspeech}
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """\
31
+ Dataset containing Mel Spectrograms, Prosody and Phone Alignments for the LibriTTS dataset.
32
+ """
33
+
34
+ _URLS = {
35
+ "dev.clean": "https://huggingface.co/datasets/cdminix/libritts-phones-and-mel/resolve/main/data/dev_clean.tar.gz",
36
+ "dev.other": "https://huggingface.co/datasets/cdminix/libritts-phones-and-mel/resolve/main/data/dev_other.tar.gz",
37
+ "test.clean": "https://huggingface.co/datasets/cdminix/libritts-phones-and-mel/resolve/main/data/test_clean.tar.gz",
38
+ "test.other": "https://huggingface.co/datasets/cdminix/libritts-phones-and-mel/resolve/main/data/test_other.tar.gz",
39
+ "train.clean.100": "https://huggingface.co/datasets/cdminix/libritts-phones-and-mel/resolve/main/data/train_clean_100.tar.gz",
40
+ "train.clean.360": "https://huggingface.co/datasets/cdminix/libritts-phones-and-mel/resolve/main/data/train_clean_360.tar.gz",
41
+ "train.other.500": "https://huggingface.co/datasets/cdminix/libritts-phones-and-mel/resolve/main/data/train_other_500.tar.gz",
42
+ }
43
+
44
+
45
+ class LibriTTSConfig(datasets.BuilderConfig):
46
+
47
+ def __init__(self, **kwargs):
48
+ """
49
+
50
+ Args:
51
+ **kwargs: keyword arguments forwarded to super.
52
+ """
53
+ super(LibrispeechConfig, self).__init__(**kwargs)
54
+
55
+
56
+ class LibriTTS(datasets.GeneratorBasedBuilder):
57
+
58
+ BUILDER_CONFIGS = [
59
+ LibriTTSConfig(
60
+ name="libritts",
61
+ version=datasets.Version(_VERSION, ""),
62
+ ),
63
+ ]
64
+
65
+ def _info(self):
66
+ features = {
67
+ "id": datasets.Value("string"),
68
+ "speaker_id": datasets.Value("string"),
69
+ "chapter_id": datasets.Value("string"),
70
+ "phones": datasets.Value("string"),
71
+ "mel": datasets.Value("string"),
72
+ "prosody": datasets.Value("string"),
73
+ "speaker_utterance": datasets.Value("string"),
74
+ "mean_speaker_utterance": datasets.Value("string"),
75
+ "mean_speaker": datasets.Value("string"),
76
+ "text": datasets.Value("string"),
77
+ }
78
+
79
+ return datasets.DatasetInfo(
80
+ description=_DESCRIPTION,
81
+ features=datasets.Features(features),
82
+ supervised_keys=None,
83
+ homepage="https://huggingface.co/datasets/cdminix/libritts-phones-and-mel",
84
+ citation=_CITATION,
85
+ task_templates=None,
86
+ )
87
+
88
+ def _split_generators(self, dl_manager):
89
+ splits = [
90
+ datasets.SplitGenerator(
91
+ name=key,
92
+ gen_kwargs={"data_path": dl_manager.download_and_extract(value)},
93
+ )
94
+ for key, value in _URLS.items()
95
+ ]
96
+
97
+ return splits
98
+
99
+ def _df_from_path(self, path):
100
+ mel_path = Path(path)
101
+ prosody_path = Path(str(mel_path).replace("_mel.png", "_prosody.png"))
102
+ phones_path = Path(str(mel_path).replace("_mel.png", "_phones.npy"))
103
+ overall_speaker_path = Path(str(mel_path).replace("_mel.png", "_speaker.npy"))
104
+ temporal_speaker_path = Path(str(mel_path).replace("_mel.png", "_speaker.png"))
105
+ mean_speaker_path = mel_path.parent.parent / "mean_speaker.npy"
106
+ text = Path(str(mel_path).replace("_mel.png", "_text.txt")).read_text().lower()
107
+
108
+ speaker_id = mel_path.parent.parent.name
109
+ chapter_id = mel_path.parent.name
110
+ _id = str(mel_path).replace("_mel.png", "")
111
+
112
+ return {
113
+ "id": _id,
114
+ "speaker_id": speaker_id,
115
+ "chapter_id": chapter_id,
116
+ "phones": phones_path,
117
+ "mel": mel_path,
118
+ "prosody": prosody_path,
119
+ "speaker_utterance": temporal_speaker_path,
120
+ "mean_speaker_utterance": overall_speaker_path,
121
+ "mean_speaker": mean_speaker_path,
122
+ "text": text,
123
+ }
124
+
125
+ def _df_from_paths_mp(self, paths):
126
+ return pd.DataFrame(
127
+ process_map(
128
+ self._df_from_path,
129
+ paths,
130
+ desc="Reading files",
131
+ max_workers=cpu_count(),
132
+ chunksize=100,
133
+ )
134
+ )
135
+
136
+ def _create_mean_speaker(self, df):
137
+ # Create mean speaker for each speaker
138
+ for _, speaker_df in df.groupby("speaker_id"):
139
+ if not Path(speaker_df["mean_speaker"].iloc[0]).exists():
140
+ mean_speaker = np.mean(
141
+ np.array(
142
+ [np.load(path) for path in speaker_df["mean_speaker_utterance"]]
143
+ ),
144
+ axis=0,
145
+ )
146
+ np.save(speaker_df["mean_speaker"].iloc[0], mean_speaker)
147
+
148
+ def _generate_examples(self, data_path):
149
+ """Generate examples."""
150
+ logger.info("⏳ Generating examples from = %s", data_path)
151
+
152
+ paths = sorted(list(Path(data_path).rglob("*_mel.png")))
153
+
154
+ df = self._df_from_paths_mp(paths)
155
+
156
+ self._create_mean_speaker(df)
157
+
158
+ for i, row in tqdm(df.iterrows(), desc="Generating examples"):
159
+ yield row["id"], row.to_dict()