rohitp1 commited on
Commit
27734a3
·
1 Parent(s): bf42375

Update custom_libri_clean_data_only_100_hours.py

Browse files
custom_libri_clean_data_only_100_hours.py CHANGED
@@ -49,12 +49,10 @@ _DL_URLS = {
49
  "dev": _DL_URL + "dev-clean.tar.gz",
50
  "test": _DL_URL + "test-clean.tar.gz",
51
  "train.100": _DL_URL + "train-clean-100.tar.gz",
52
- #"train.360": _DL_URL + "train-clean-360.tar.gz",
53
  },
54
  "other": {
55
  "test": _DL_URL + "test-other.tar.gz",
56
  "dev": _DL_URL + "dev-other.tar.gz",
57
- "train.500": _DL_URL + "train-other-500.tar.gz",
58
  },
59
  "all": {
60
  "dev.clean": _DL_URL + "dev-clean.tar.gz",
@@ -62,8 +60,6 @@ _DL_URLS = {
62
  "test.clean": _DL_URL + "test-clean.tar.gz",
63
  "test.other": _DL_URL + "test-other.tar.gz",
64
  "train.clean.100": _DL_URL + "train-clean-100.tar.gz",
65
- "train.clean.360": _DL_URL + "train-clean-360.tar.gz",
66
- "train.other.500": _DL_URL + "train-other-500.tar.gz",
67
  },
68
  }
69
 
@@ -155,13 +151,13 @@ class LibrispeechASR(datasets.GeneratorBasedBuilder):
155
  ]
156
  elif self.config.name == "other":
157
  train_splits = [
158
- datasets.SplitGenerator(
159
- name="train.500",
160
- gen_kwargs={
161
- "local_extracted_archive": local_extracted_archive.get("train.500"),
162
- "files": dl_manager.iter_archive(archive_path["train.500"]),
163
- },
164
- )
165
  ]
166
  dev_splits = [
167
  datasets.SplitGenerator(
@@ -190,20 +186,20 @@ class LibrispeechASR(datasets.GeneratorBasedBuilder):
190
  "files": dl_manager.iter_archive(archive_path["train.clean.100"]),
191
  },
192
  ),
193
- datasets.SplitGenerator(
194
- name="train.clean.360",
195
- gen_kwargs={
196
- "local_extracted_archive": local_extracted_archive.get("train.clean.360"),
197
- "files": dl_manager.iter_archive(archive_path["train.clean.360"]),
198
- },
199
- ),
200
- datasets.SplitGenerator(
201
- name="train.other.500",
202
- gen_kwargs={
203
- "local_extracted_archive": local_extracted_archive.get("train.other.500"),
204
- "files": dl_manager.iter_archive(archive_path["train.other.500"]),
205
- },
206
- ),
207
  ]
208
  dev_splits = [
209
  datasets.SplitGenerator(
 
49
  "dev": _DL_URL + "dev-clean.tar.gz",
50
  "test": _DL_URL + "test-clean.tar.gz",
51
  "train.100": _DL_URL + "train-clean-100.tar.gz",
 
52
  },
53
  "other": {
54
  "test": _DL_URL + "test-other.tar.gz",
55
  "dev": _DL_URL + "dev-other.tar.gz",
 
56
  },
57
  "all": {
58
  "dev.clean": _DL_URL + "dev-clean.tar.gz",
 
60
  "test.clean": _DL_URL + "test-clean.tar.gz",
61
  "test.other": _DL_URL + "test-other.tar.gz",
62
  "train.clean.100": _DL_URL + "train-clean-100.tar.gz",
 
 
63
  },
64
  }
65
 
 
151
  ]
152
  elif self.config.name == "other":
153
  train_splits = [
154
+ # datasets.SplitGenerator(
155
+ # name="train.500",
156
+ # gen_kwargs={
157
+ # "local_extracted_archive": local_extracted_archive.get("train.500"),
158
+ # "files": dl_manager.iter_archive(archive_path["train.500"]),
159
+ # },
160
+ # )
161
  ]
162
  dev_splits = [
163
  datasets.SplitGenerator(
 
186
  "files": dl_manager.iter_archive(archive_path["train.clean.100"]),
187
  },
188
  ),
189
+ # datasets.SplitGenerator(
190
+ # name="train.clean.360",
191
+ # gen_kwargs={
192
+ # "local_extracted_archive": local_extracted_archive.get("train.clean.360"),
193
+ # "files": dl_manager.iter_archive(archive_path["train.clean.360"]),
194
+ # },
195
+ # ),
196
+ # datasets.SplitGenerator(
197
+ # name="train.other.500",
198
+ # gen_kwargs={
199
+ # "local_extracted_archive": local_extracted_archive.get("train.other.500"),
200
+ # "files": dl_manager.iter_archive(archive_path["train.other.500"]),
201
+ # },
202
+ # ),
203
  ]
204
  dev_splits = [
205
  datasets.SplitGenerator(