cdminix commited on
Commit
1ea004e
1 Parent(s): 6358bd1

initial commit

Browse files
Files changed (2) hide show
  1. README.md +19 -0
  2. libritts-aligned.py +311 -0
README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: LibriTTS Corpus with Forced Alignments
3
+ annotations_creators:
4
+ - crowdsourced
5
+ language: en
6
+ tags:
7
+ - speech
8
+ - audio
9
+ - automatic-speech-recognition
10
+ - speech-synthesis
11
+ license:
12
+ - cc-by-4.0
13
+ task_categories:
14
+ - automatic-speech-recognition
15
+ - speech-synthesis
16
+ extra_gated_prompt: "When using this dataset to download LibriTTS, you agree to the terms on https://www.openslr.org"
17
+ ---
18
+
19
+ # Dataset Card for LibriTTS with Forced Alignments (and Measures)
libritts-aligned.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """LibriTTS dataset with forced alignments."""
2
+
3
+ import os
4
+ from pathlib import Path
5
+ import hashlib
6
+ import pickle
7
+
8
+ import datasets
9
+ import pandas as pd
10
+ import numpy as np
11
+ from alignments.datasets.libritts import LibrittsDataset
12
+ from tqdm.contrib.concurrent import process_map
13
+ from tqdm.auto import tqdm
14
+ from multiprocessing import cpu_count
15
+ from phones.convert import Converter
16
+ import torchaudio
17
+ import torchaudio.transforms as AT
18
+
19
+ logger = datasets.logging.get_logger(__name__)
20
+
21
+ _PHONESET = "arpabet"
22
+
23
+ _VERBOSE = os.environ.get("METTS_VERBOSE", True)
24
+
25
+ _MAX_WORKERS = os.environ.get("METTS_MAX_WORKERS", cpu_count())
26
+
27
+ _VERSION = "1.0.0"
28
+
29
+ _PATH = os.environ.get("METTS_PATH", os.environ.get("HF_DATASETS_CACHE", None))
30
+ if _PATH is not None and not os.path.exists(_PATH):
31
+ os.makedirs(_PATH)
32
+
33
+ _NO_MEASURES = os.environ.get("METTS_NO_MEASURES", False)
34
+
35
+ _CITATION = """\
36
+ @article{zen2019libritts,
37
+ title={LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech},
38
+ author={Zen, Heiga and Dang, Viet and Clark, Rob and Zhang, Yu and Weiss, Ron J and Jia, Ye and Chen, Zhifeng and Wu, Yonghui},
39
+ journal={Interspeech},
40
+ year={2019}
41
+ }
42
+ @article{https://doi.org/10.48550/arxiv.2211.16049,
43
+ author = {Minixhofer, Christoph and Klejch, Ondřej and Bell, Peter},
44
+ title = {Evaluating and reducing the distance between synthetic and real speech distributions},
45
+ year = {2022}
46
+ }
47
+ """
48
+
49
+ _DESCRIPTION = """\
50
+ Dataset used for loading TTS spectrograms and waveform audio with alignments and a number of configurable "measures", which are extracted from the raw audio.
51
+ """
52
+
53
+ _URL = "https://www.openslr.org/resources/60/"
54
+ _URLS = {
55
+ "dev-clean": _URL + "dev-clean.tar.gz",
56
+ "dev-other": _URL + "dev-other.tar.gz",
57
+ "test-clean": _URL + "test-clean.tar.gz",
58
+ "test-other": _URL + "test-other.tar.gz",
59
+ "train-clean-100": _URL + "train-clean-100.tar.gz",
60
+ "train-clean-360": _URL + "train-clean-360.tar.gz",
61
+ "train-other-500": _URL + "train-other-500.tar.gz",
62
+ }
63
+
64
+
65
+ class MeTTSConfig(datasets.BuilderConfig):
66
+ """BuilderConfig for MeTTS."""
67
+
68
+ def __init__(self, sampling_rate=22050, hop_length=256, win_length=1024, **kwargs):
69
+ """BuilderConfig for MeTTS.
70
+
71
+ Args:
72
+ **kwargs: keyword arguments forwarded to super.
73
+ """
74
+ super(MeTTSConfig, self).__init__(**kwargs)
75
+
76
+ self.sampling_rate = sampling_rate
77
+ self.hop_length = hop_length
78
+ self.win_length = win_length
79
+
80
+ if _PATH is None:
81
+ raise ValueError("Please set the environment variable METTS_PATH to point to the MeTTS dataset directory.")
82
+ elif _PATH == os.environ.get("HF_DATASETS_CACHE", None):
83
+ logger.warning("Please set the environment variable METTS_PATH to point to the MeTTS dataset directory. Using HF_DATASETS_CACHE as a fallback.")
84
+
85
+ class MeTTS(datasets.GeneratorBasedBuilder):
86
+ """MeTTS dataset."""
87
+
88
+ BUILDER_CONFIGS = [
89
+ MeTTSConfig(
90
+ name="libritts",
91
+ version=datasets.Version(_VERSION, ""),
92
+ ),
93
+ ]
94
+
95
+ def _info(self):
96
+ features = {
97
+ "id": datasets.Value("string"),
98
+ "speaker": datasets.Value("string"),
99
+ "text": datasets.Value("string"),
100
+ "start": datasets.Value("float32"),
101
+ "end": datasets.Value("float32"),
102
+ # phone features
103
+ "phones": datasets.Sequence(datasets.Value("string")),
104
+ "phone_durations": datasets.Sequence(datasets.Value("int32")),
105
+ # audio feature
106
+ "audio": datasets.Value("string") # datasets.Audio(sampling_rate=self.config.sampling_rate),
107
+ }
108
+
109
+ return datasets.DatasetInfo(
110
+ description=_DESCRIPTION,
111
+ features=datasets.Features(features),
112
+ supervised_keys=None,
113
+ homepage="https://github.com/MiniXC/MeTTS",
114
+ citation=_CITATION,
115
+ task_templates=None,
116
+ )
117
+
118
+ def _split_generators(self, dl_manager):
119
+ ds_dict = {}
120
+ for name, url in _URLS.items():
121
+ ds_dict[name] = self._create_alignments_ds(name, url)
122
+ splits = [
123
+ datasets.SplitGenerator(
124
+ name=key.replace("-", "."),
125
+ gen_kwargs={"ds": self._create_data(value)}
126
+ )
127
+ for key, value in ds_dict.items()
128
+ ]
129
+ # dataframe with all data
130
+ data_train = self._create_data([ds_dict["train-clean-100"], ds_dict["train-clean-360"], ds_dict["train-other-500"]])
131
+ data_dev = self._create_data([ds_dict["dev-clean"], ds_dict["dev-other"]])
132
+ data_test = self._create_data([ds_dict["test-clean"], ds_dict["test-other"]])
133
+ data_all = pd.concat([data_train, data_dev, data_test])
134
+ splits += [
135
+ datasets.SplitGenerator(
136
+ name="train.all",
137
+ gen_kwargs={
138
+ "ds": data_all,
139
+ }
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name="dev.all",
143
+ gen_kwargs={
144
+ "ds": data_dev,
145
+ }
146
+ ),
147
+ datasets.SplitGenerator(
148
+ name="test.all",
149
+ gen_kwargs={
150
+ "ds": data_test,
151
+ }
152
+ ),
153
+ ]
154
+ # move last row for each speaker from data_all to dev dataframe
155
+ data_dev = data_all.copy()
156
+ data_dev = data_dev.sort_values(by=["speaker", "audio"])
157
+ data_dev = data_dev.groupby("speaker").tail(1)
158
+ data_dev = data_dev.reset_index()
159
+ # remove last row for each speaker from data_all
160
+ data_all = data_all[~data_all["audio"].isin(data_dev["audio"])]
161
+ splits += [
162
+ datasets.SplitGenerator(
163
+ name="train",
164
+ gen_kwargs={
165
+ "ds": data_all,
166
+ }
167
+ ),
168
+ datasets.SplitGenerator(
169
+ name="dev",
170
+ gen_kwargs={
171
+ "ds": data_dev,
172
+ }
173
+ ),
174
+ ]
175
+ self.alignments_ds = None
176
+ self.data = None
177
+ return splits
178
+
179
+ def _create_alignments_ds(self, name, url):
180
+ self.empty_textgrids = 0
181
+ ds_hash = hashlib.md5(os.path.join(_PATH, f"{name}-alignments").encode()).hexdigest()
182
+ pkl_path = os.path.join(_PATH, f"{ds_hash}.pkl")
183
+ if os.path.exists(pkl_path):
184
+ ds = pickle.load(open(pkl_path, "rb"))
185
+ else:
186
+ tgt_dir = os.path.join(_PATH, f"{name}-alignments")
187
+ src_dir = os.path.join(_PATH, f"{name}-data")
188
+ if os.path.exists(tgt_dir):
189
+ src_dir = None
190
+ url = None
191
+ if os.path.exists(src_dir):
192
+ url = None
193
+ ds = LibrittsDataset(
194
+ target_directory=tgt_dir,
195
+ source_directory=src_dir,
196
+ source_url=url,
197
+ verbose=_VERBOSE,
198
+ tmp_directory=os.path.join(_PATH, f"{name}-tmp"),
199
+ chunk_size=1000,
200
+ )
201
+ pickle.dump(ds, open(pkl_path, "wb"))
202
+ return ds, ds_hash
203
+
204
+ def _create_data(self, data):
205
+ entries = []
206
+ self.phone_cache = {}
207
+ self.phone_converter = Converter()
208
+ if not isinstance(data, list):
209
+ data = [data]
210
+ hashes = [ds_hash for ds, ds_hash in data]
211
+ ds = [ds for ds, ds_hash in data]
212
+ self.ds = ds
213
+ del data
214
+ for i, ds in enumerate(ds):
215
+ if os.path.exists(os.path.join(_PATH, f"{hashes[i]}-entries.pkl")):
216
+ add_entries = pickle.load(open(os.path.join(_PATH, f"{hashes[i]}-entries.pkl"), "rb"))
217
+ else:
218
+ add_entries = [
219
+ entry
220
+ for entry in process_map(
221
+ self._create_entry,
222
+ zip([i] * len(ds), np.arange(len(ds))),
223
+ chunksize=10_000,
224
+ max_workers=_MAX_WORKERS,
225
+ desc=f"processing dataset {hashes[i]}",
226
+ tqdm_class=tqdm,
227
+ )
228
+ if entry is not None
229
+ ]
230
+ pickle.dump(add_entries, open(os.path.join(_PATH, f"{hashes[i]}-entries.pkl"), "wb"))
231
+ entries += add_entries
232
+ if self.empty_textgrids > 0:
233
+ logger.warning(f"Found {self.empty_textgrids} empty textgrids")
234
+ return pd.DataFrame(
235
+ entries,
236
+ columns=[
237
+ "phones",
238
+ "duration",
239
+ "start",
240
+ "end",
241
+ "audio",
242
+ "speaker",
243
+ "text",
244
+ "basename",
245
+ ],
246
+ )
247
+ del self.ds, self.phone_cache, self.phone_converter
248
+
249
+ def _create_entry(self, dsi_idx):
250
+ dsi, idx = dsi_idx
251
+ item = self.ds[dsi][idx]
252
+ start, end = item["phones"][0][0], item["phones"][-1][1]
253
+
254
+ phones = []
255
+ durations = []
256
+
257
+ for i, p in enumerate(item["phones"]):
258
+ s, e, phone = p
259
+ phone.replace("ˌ", "")
260
+ r_phone = phone.replace("0", "").replace("1", "")
261
+ if len(r_phone) > 0:
262
+ phone = r_phone
263
+ if "[" not in phone:
264
+ o_phone = phone
265
+ if o_phone not in self.phone_cache:
266
+ phone = self.phone_converter(
267
+ phone, _PHONESET, lang=None
268
+ )[0]
269
+ self.phone_cache[o_phone] = phone
270
+ phone = self.phone_cache[o_phone]
271
+ phones.append(phone)
272
+ durations.append(
273
+ int(
274
+ np.round(e * self.config.sampling_rate / self.config.hop_length)
275
+ - np.round(s * self.config.sampling_rate / self.config.hop_length)
276
+ )
277
+ )
278
+
279
+ if start >= end:
280
+ self.empty_textgrids += 1
281
+ return None
282
+
283
+ return (
284
+ phones,
285
+ durations,
286
+ start,
287
+ end,
288
+ item["wav"],
289
+ str(item["speaker"]).split("/")[-1],
290
+ item["transcript"],
291
+ Path(item["wav"]).name,
292
+ )
293
+
294
+ def _generate_examples(self, ds):
295
+ j = 0
296
+ for i, row in ds.iterrows():
297
+ # 10kB is the minimum size of a wav file for our purposes
298
+ if Path(row["audio"]).stat().st_size >= 10_000:
299
+ if len(row["phones"]) < 384:
300
+ result = {
301
+ "id": row["basename"],
302
+ "speaker": row["speaker"],
303
+ "text": row["text"],
304
+ "start": row["start"],
305
+ "end": row["end"],
306
+ "phones": row["phones"],
307
+ "phone_durations": row["duration"],
308
+ "audio": str(row["audio"]),
309
+ }
310
+ yield j, result
311
+ j += 1