cdminix commited on
Commit
81c31e0
·
1 Parent(s): 4be7270

add max_workers, textgrids

Browse files
Files changed (1) hide show
  1. libritts-aligned.py +110 -59
libritts-aligned.py CHANGED
@@ -12,6 +12,7 @@ from alignments.datasets.libritts import LibrittsDataset
12
  from tqdm.contrib.concurrent import process_map
13
  from tqdm.auto import tqdm
14
  from multiprocessing import cpu_count
 
15
  from phones.convert import Converter
16
  import torchaudio
17
  import torchaudio.transforms as AT
@@ -22,7 +23,12 @@ _PHONESET = "arpabet"
22
 
23
  _VERBOSE = os.environ.get("LIBRITTS_VERBOSE", True)
24
  _MAX_WORKERS = os.environ.get("LIBRITTS_MAX_WORKERS", cpu_count())
 
25
  _PATH = os.environ.get("LIBRITTS_PATH", os.environ.get("HF_DATASETS_CACHE", None))
 
 
 
 
26
  if _PATH is not None and not os.path.exists(_PATH):
27
  os.makedirs(_PATH)
28
 
@@ -56,6 +62,7 @@ _URLS = {
56
  "train-clean-360": _URL + "train-clean-360.tar.gz",
57
  "train-other-500": _URL + "train-other-500.tar.gz",
58
  }
 
59
 
60
 
61
  class LibriTTSAlignConfig(datasets.BuilderConfig):
@@ -72,11 +79,16 @@ class LibriTTSAlignConfig(datasets.BuilderConfig):
72
  self.sampling_rate = sampling_rate
73
  self.hop_length = hop_length
74
  self.win_length = win_length
75
-
76
  if _PATH is None:
77
- raise ValueError("Please set the environment variable LIBRITTS_PATH to point to the LibriTTS dataset directory.")
 
 
78
  elif _PATH == os.environ.get("HF_DATASETS_CACHE", None):
79
- logger.warning("Please set the environment variable LIBRITTS_PATH to point to the LibriTTS dataset directory. Using HF_DATASETS_CACHE as a fallback.")
 
 
 
80
 
81
  class LibriTTSAlign(datasets.GeneratorBasedBuilder):
82
  """LibriTTSAlign dataset."""
@@ -99,7 +111,7 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
99
  "phones": datasets.Sequence(datasets.Value("string")),
100
  "phone_durations": datasets.Sequence(datasets.Value("int32")),
101
  # audio feature
102
- "audio": datasets.Value("string")
103
  }
104
 
105
  return datasets.DatasetInfo(
@@ -117,64 +129,98 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
117
  ds_dict[name] = self._create_alignments_ds(name, url)
118
  splits = [
119
  datasets.SplitGenerator(
120
- name=key.replace("-", "."),
121
- gen_kwargs={"ds": self._create_data(value)}
122
- )
123
  for key, value in ds_dict.items()
124
  ]
125
  # dataframe with all data
126
- data_train = self._create_data([ds_dict["train-clean-100"], ds_dict["train-clean-360"], ds_dict["train-other-500"]])
127
- data_dev = self._create_data([ds_dict["dev-clean"], ds_dict["dev-other"]])
128
- data_test = self._create_data([ds_dict["test-clean"], ds_dict["test-other"]])
129
- data_all = pd.concat([data_train, data_dev, data_test])
130
- splits += [
131
- datasets.SplitGenerator(
132
- name="train.all",
133
- gen_kwargs={
134
- "ds": data_all,
135
- }
136
- ),
137
- datasets.SplitGenerator(
138
- name="dev.all",
139
- gen_kwargs={
140
- "ds": data_dev,
141
- }
142
- ),
143
- datasets.SplitGenerator(
144
- name="test.all",
145
- gen_kwargs={
146
- "ds": data_test,
147
- }
148
- ),
149
- ]
150
- # move last row for each speaker from data_all to dev dataframe
151
- data_dev = data_all.copy()
152
- data_dev = data_dev.sort_values(by=["speaker", "audio"])
153
- data_dev = data_dev.groupby("speaker").tail(1)
154
- data_dev = data_dev.reset_index()
155
- # remove last row for each speaker from data_all
156
- data_all = data_all[~data_all["audio"].isin(data_dev["audio"])]
157
- splits += [
158
- datasets.SplitGenerator(
159
- name="train",
160
- gen_kwargs={
161
- "ds": data_all,
162
- }
163
- ),
164
- datasets.SplitGenerator(
165
- name="dev",
166
- gen_kwargs={
167
- "ds": data_dev,
168
- }
169
- ),
170
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  self.alignments_ds = None
172
  self.data = None
173
  return splits
174
 
175
  def _create_alignments_ds(self, name, url):
176
  self.empty_textgrids = 0
177
- ds_hash = hashlib.md5(os.path.join(_PATH, f"{name}-alignments").encode()).hexdigest()
 
 
178
  pkl_path = os.path.join(_PATH, f"{ds_hash}.pkl")
179
  if os.path.exists(pkl_path):
180
  ds = pickle.load(open(pkl_path, "rb"))
@@ -190,9 +236,11 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
190
  target_directory=tgt_dir,
191
  source_directory=src_dir,
192
  source_url=url,
 
193
  verbose=_VERBOSE,
194
  tmp_directory=os.path.join(_PATH, f"{name}-tmp"),
195
  chunk_size=1000,
 
196
  )
197
  pickle.dump(ds, open(pkl_path, "wb"))
198
  return ds, ds_hash
@@ -209,7 +257,9 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
209
  del data
210
  for i, ds in enumerate(ds):
211
  if os.path.exists(os.path.join(_PATH, f"{hashes[i]}-entries.pkl")):
212
- add_entries = pickle.load(open(os.path.join(_PATH, f"{hashes[i]}-entries.pkl"), "rb"))
 
 
213
  else:
214
  add_entries = [
215
  entry
@@ -223,7 +273,10 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
223
  )
224
  if entry is not None
225
  ]
226
- pickle.dump(add_entries, open(os.path.join(_PATH, f"{hashes[i]}-entries.pkl"), "wb"))
 
 
 
227
  entries += add_entries
228
  if self.empty_textgrids > 0:
229
  logger.warning(f"Found {self.empty_textgrids} empty textgrids")
@@ -259,9 +312,7 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
259
  if "[" not in phone:
260
  o_phone = phone
261
  if o_phone not in self.phone_cache:
262
- phone = self.phone_converter(
263
- phone, _PHONESET, lang=None
264
- )[0]
265
  self.phone_cache[o_phone] = phone
266
  phone = self.phone_cache[o_phone]
267
  phones.append(phone)
@@ -304,4 +355,4 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
304
  "audio": str(row["audio"]),
305
  }
306
  yield j, result
307
- j += 1
 
12
  from tqdm.contrib.concurrent import process_map
13
  from tqdm.auto import tqdm
14
  from multiprocessing import cpu_count
15
+ import multiprocessing as mp
16
  from phones.convert import Converter
17
  import torchaudio
18
  import torchaudio.transforms as AT
 
23
 
24
  _VERBOSE = os.environ.get("LIBRITTS_VERBOSE", True)
25
  _MAX_WORKERS = os.environ.get("LIBRITTS_MAX_WORKERS", cpu_count())
26
+ _MAX_WORKERS = int(_MAX_WORKERS)
27
  _PATH = os.environ.get("LIBRITTS_PATH", os.environ.get("HF_DATASETS_CACHE", None))
28
+ _DOWNLOAD_SPLITS = os.environ.get(
29
+ "LIBRITTS_DOWNLOAD_SPLITS",
30
+ "train-clean-100,train-clean-360,train-other-500,dev-clean,dev-other,test-clean,test-other",
31
+ ).split(",")
32
  if _PATH is not None and not os.path.exists(_PATH):
33
  os.makedirs(_PATH)
34
 
 
62
  "train-clean-360": _URL + "train-clean-360.tar.gz",
63
  "train-other-500": _URL + "train-other-500.tar.gz",
64
  }
65
+ _URLS = {k: v for k, v in _URLS.items() if k in _DOWNLOAD_SPLITS}
66
 
67
 
68
  class LibriTTSAlignConfig(datasets.BuilderConfig):
 
79
  self.sampling_rate = sampling_rate
80
  self.hop_length = hop_length
81
  self.win_length = win_length
82
+
83
  if _PATH is None:
84
+ raise ValueError(
85
+ "Please set the environment variable LIBRITTS_PATH to point to the LibriTTS dataset directory."
86
+ )
87
  elif _PATH == os.environ.get("HF_DATASETS_CACHE", None):
88
+ logger.warning(
89
+ "Please set the environment variable LIBRITTS_PATH to point to the LibriTTS dataset directory. Using HF_DATASETS_CACHE as a fallback."
90
+ )
91
+
92
 
93
  class LibriTTSAlign(datasets.GeneratorBasedBuilder):
94
  """LibriTTSAlign dataset."""
 
111
  "phones": datasets.Sequence(datasets.Value("string")),
112
  "phone_durations": datasets.Sequence(datasets.Value("int32")),
113
  # audio feature
114
+ "audio": datasets.Value("string"),
115
  }
116
 
117
  return datasets.DatasetInfo(
 
129
  ds_dict[name] = self._create_alignments_ds(name, url)
130
  splits = [
131
  datasets.SplitGenerator(
132
+ name=key.replace("-", "."), gen_kwargs={"ds": self._create_data(value)}
133
+ )
 
134
  for key, value in ds_dict.items()
135
  ]
136
  # dataframe with all data
137
+ data_train, data_dev, data_test, data_all = None, None, None, None
138
+ if (
139
+ "train-clean-100" in _URLS
140
+ and "train-clean-360" in _URLS
141
+ and "train-other-500" in _URLS
142
+ ):
143
+ data_train = self._create_data(
144
+ [
145
+ ds_dict["train-clean-100"],
146
+ ds_dict["train-clean-360"],
147
+ ds_dict["train-other-500"],
148
+ ]
149
+ )
150
+ if "dev-clean" in _URLS and "dev-other" in _URLS:
151
+ data_dev = self._create_data([ds_dict["dev-clean"], ds_dict["dev-other"]])
152
+ if "test-clean" in _URLS and "test-other" in _URLS:
153
+ data_test = self._create_data(
154
+ [ds_dict["test-clean"], ds_dict["test-other"]]
155
+ )
156
+ if (
157
+ "train-clean-100" in _URLS
158
+ and "train-clean-360" in _URLS
159
+ and "train-other-500" in _URLS
160
+ and "dev-clean" in _URLS
161
+ and "dev-other" in _URLS
162
+ and "test-clean" in _URLS
163
+ and "test-other" in _URLS
164
+ ):
165
+ data_all = pd.concat([data_train, data_dev, data_test])
166
+ if data_all is not None:
167
+ splits.append(
168
+ datasets.SplitGenerator(
169
+ name="train.all",
170
+ gen_kwargs={
171
+ "ds": data_all,
172
+ },
173
+ )
174
+ )
175
+ if data_dev is not None:
176
+ splits.append(
177
+ datasets.SplitGenerator(
178
+ name="dev.all",
179
+ gen_kwargs={
180
+ "ds": data_dev,
181
+ },
182
+ )
183
+ )
184
+ if data_test is not None:
185
+ splits.append(
186
+ datasets.SplitGenerator(
187
+ name="test.all",
188
+ gen_kwargs={
189
+ "ds": data_test,
190
+ },
191
+ )
192
+ )
193
+ if data_dev is not None and data_all is not None:
194
+ # move last row for each speaker from data_all to dev dataframe
195
+ data_dev = data_all.copy()
196
+ data_dev = data_dev.sort_values(by=["speaker", "audio"])
197
+ data_dev = data_dev.groupby("speaker").tail(1)
198
+ data_dev = data_dev.reset_index()
199
+ # remove last row for each speaker from data_all
200
+ data_all = data_all[~data_all["audio"].isin(data_dev["audio"])]
201
+ splits += [
202
+ datasets.SplitGenerator(
203
+ name="train",
204
+ gen_kwargs={
205
+ "ds": data_all,
206
+ },
207
+ ),
208
+ datasets.SplitGenerator(
209
+ name="dev",
210
+ gen_kwargs={
211
+ "ds": data_dev,
212
+ },
213
+ ),
214
+ ]
215
  self.alignments_ds = None
216
  self.data = None
217
  return splits
218
 
219
  def _create_alignments_ds(self, name, url):
220
  self.empty_textgrids = 0
221
+ ds_hash = hashlib.md5(
222
+ os.path.join(_PATH, f"{name}-alignments").encode()
223
+ ).hexdigest()
224
  pkl_path = os.path.join(_PATH, f"{ds_hash}.pkl")
225
  if os.path.exists(pkl_path):
226
  ds = pickle.load(open(pkl_path, "rb"))
 
236
  target_directory=tgt_dir,
237
  source_directory=src_dir,
238
  source_url=url,
239
+ textgrid_url=f"https://huggingface.co/datasets/cdminix/libritts-aligned/resolve/main/data/{name.replace('-', '_')}.tar.gz",
240
  verbose=_VERBOSE,
241
  tmp_directory=os.path.join(_PATH, f"{name}-tmp"),
242
  chunk_size=1000,
243
+ n_workers=_MAX_WORKERS,
244
  )
245
  pickle.dump(ds, open(pkl_path, "wb"))
246
  return ds, ds_hash
 
257
  del data
258
  for i, ds in enumerate(ds):
259
  if os.path.exists(os.path.join(_PATH, f"{hashes[i]}-entries.pkl")):
260
+ add_entries = pickle.load(
261
+ open(os.path.join(_PATH, f"{hashes[i]}-entries.pkl"), "rb")
262
+ )
263
  else:
264
  add_entries = [
265
  entry
 
273
  )
274
  if entry is not None
275
  ]
276
+ pickle.dump(
277
+ add_entries,
278
+ open(os.path.join(_PATH, f"{hashes[i]}-entries.pkl"), "wb"),
279
+ )
280
  entries += add_entries
281
  if self.empty_textgrids > 0:
282
  logger.warning(f"Found {self.empty_textgrids} empty textgrids")
 
312
  if "[" not in phone:
313
  o_phone = phone
314
  if o_phone not in self.phone_cache:
315
+ phone = self.phone_converter(phone, _PHONESET, lang=None)[0]
 
 
316
  self.phone_cache[o_phone] = phone
317
  phone = self.phone_cache[o_phone]
318
  phones.append(phone)
 
355
  "audio": str(row["audio"]),
356
  }
357
  yield j, result
358
+ j += 1