File size: 922 Bytes
a03c9b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from utils.datasets_eval import AudioFileDataset
from torch.utils.data import DataLoader
import pytorch_lightning as pl


def test():

    ds = AudioFileDataset()
    dl = DataLoader(
        ds, batch_size=None, collate_fn=lambda k: k
    )  # empty collate_fn is required to use mixed types.

    for x, y in dl:
        break

    class MyModel(pl.LightningModule):

        def __init__(self, **kwargs):
            super().__init__()

        def forward(self, x):
            return x

        def training_step(self, batch, batch_idx):
            return 0

        def validation_step(self, batch, batch_idx):
            print(batch)
            return 0

        def train_dataloader(self):
            return dl

        def val_dataloader(self):
            return dl

        def configure_optimizers(self):
            return None

    model = MyModel()
    trainer = pl.Trainer()
    trainer.validate(model)