Spaces:
Running
Running
File size: 1,872 Bytes
9d61c9b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import unittest
import torch
from torch.utils.data import DataLoader
from training.datasets import LibriTTSDatasetVocoder
class TestLibriTTSDatasetAcoustic(unittest.TestCase):
def setUp(self):
self.batch_size = 2
self.lang = "en"
self.download = False
self.dataset = LibriTTSDatasetVocoder(
root="datasets_cache/LIBRITTS",
batch_size=self.batch_size,
download=self.download,
)
def test_len(self):
self.assertEqual(len(self.dataset), 33236)
def test_getitem(self):
sample = self.dataset[0]
self.assertEqual(sample["mel"].shape, torch.Size([100, 64]))
self.assertEqual(sample["audio"].shape, torch.Size([16384]))
self.assertEqual(sample["speaker_id"], 1034)
def test_collate_fn(self):
data = [
self.dataset[0],
self.dataset[2],
]
# Call the collate_fn method
result = self.dataset.collate_fn(data)
# Check the output
self.assertEqual(len(result), 4)
# Check that all the batches are the same size
for batch in result:
self.assertEqual(len(batch), self.batch_size)
def test_dataloader(self):
# Create a DataLoader from the dataset
dataloader = DataLoader(
self.dataset,
batch_size=self.batch_size,
shuffle=False,
collate_fn=self.dataset.collate_fn,
)
iter_dataloader = iter(dataloader)
# Iterate over the DataLoader and check the output
for _, items in enumerate([next(iter_dataloader), next(iter_dataloader)]):
# Check the batch size
self.assertEqual(len(items), 4)
for it in items:
self.assertEqual(len(it), self.batch_size)
if __name__ == "__main__":
unittest.main()
|