Datasets:

ArXiv:
License:
ZhenYe234's picture
Update README.md
f104f35 verified
---
license: cc-by-nc-nd-4.0
---
[![arXiv](https://img.shields.io/badge/arXiv-Paper-<COLOR>.svg)](https://arxiv.org/abs/2502.04128)
**Update (2025-02-07):** Our paper has been released!
This script is for merging tokenized speech datasets stored in memmap format. The input datasets can be combined to form larger training datasets.
```python
import numpy as np
import os
def merge_memmap_datasets(dataset_dirs, output_dir):
# Ensure the output directory exists
os.makedirs(output_dir, exist_ok=True)
# Dataset splits to be merged
splits = ['train', 'val']
for split in splits:
shapes = []
seq_len = None
total_samples = 0
# Collect shapes of all datasets and check sequence length consistency
for dataset_dir in dataset_dirs:
shape_path = os.path.join(dataset_dir, f'{split}_input_ids_shape.npy')
if not os.path.exists(shape_path):
print(f"Warning: {split}_input_ids_shape.npy not found in {dataset_dir}, skipping this dataset.")
continue
shape = np.load(shape_path)
print(f"Loaded shape of {split} data from {dataset_dir}: {shape}")
shape = tuple(shape)
shapes.append((dataset_dir, shape))
total_samples += shape[0]
if seq_len is None:
seq_len = shape[1]
elif seq_len != shape[1]:
print(f"Error: Sequence length mismatch in {split} data from {dataset_dir}.")
return
if total_samples == 0:
print(f"Error: No valid {split} data found for merging.")
continue
new_shape = (total_samples, seq_len)
# Create new memmap file
output_memmap_path = os.path.join(output_dir, f'{split}_input_ids.memmap')
output_memmap = np.memmap(
output_memmap_path, dtype='int32', mode='w+', shape=new_shape
)
# Copy data from each dataset to the new memmap file
start_idx = 0
for dataset_dir, shape in shapes:
memmap_path = os.path.join(dataset_dir, f'{split}_input_ids.memmap')
data = np.memmap(
memmap_path, dtype='int32', mode='r', shape=shape
)
end_idx = start_idx + shape[0]
output_memmap[start_idx:end_idx, :] = data[:]
print(f"Merged {split} data from {dataset_dir} into positions {start_idx}:{end_idx}")
start_idx = end_idx
del data # Free memory
# Delete temporary variable and flush data to disk
del output_memmap
# Save the new shape file
np.save(os.path.join(output_dir, f'{split}_input_ids_shape.npy'), new_shape)
print(f"Completed merging {split} data. New shape: {new_shape}")
if __name__ == "__main__":
dataset_dirs = [
'libriheavy_tts_1',
'libriheavy_tts_2',
'libriheavy_tts_3',
'libriheavy_tts_4',
'emilia_en_1',
'emilia_en_2',
'emilia_en_3',
'emilia_en_4',
]
output_dir = 'libriheavy_tts_all'
merge_memmap_datasets(dataset_dirs, output_dir)
```