File size: 2,531 Bytes
83034b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
from data.sampler import InfiniteSamplerWrapper


def find_dataset_using_name(dataset_name):
    # Given the option --dataset [datasetname],
    # the file "datasets/datasetname_dataset.py"
    # will be imported.
    dataset_filename = "data." + dataset_name + "_dataset"
    datasetlib = importlib.import_module(dataset_filename)

    # In the file, the class called DatasetNameDataset() will
    # be instantiated. It has to be a subclass of BaseDataset,
    # and it is case-insensitive.
    dataset = None
    target_dataset_name = dataset_name.replace('_', '') + 'dataset'
    for name, cls in datasetlib.__dict__.items():
        if name.lower() == target_dataset_name.lower() \
           and issubclass(cls, BaseDataset):
            dataset = cls

    if dataset is None:
        raise ValueError("In %s.py, there should be a subclass of BaseDataset "
                         "with class name that matches %s in lowercase." %
                         (dataset_filename, target_dataset_name))

    return dataset


def get_option_setter(dataset_name):
    dataset_class = find_dataset_using_name(dataset_name)
    return dataset_class.modify_commandline_options


def create_dataloader(opt):
    if opt.phase=='test':
        dataset = find_dataset_using_name(opt.dataset_mode)
        instance = dataset()
        instance.initialize(opt)
        print("dataset [%s] of size %d was created" %
              (type(instance).__name__, len(instance)))
        dataloader = torch.utils.data.DataLoader(
            instance,
            batch_size=opt.batchSize,
            shuffle=not opt.serial_batches,
            num_workers=int(opt.nThreads),
            drop_last=opt.isTrain
        )
        return dataloader

    else:
        dataset = find_dataset_using_name(opt.dataset_mode)
        instance = dataset()
        instance.initialize(opt)
        print("dataset [%s] of size %d was created" %
              (type(instance).__name__, len(instance)))

        # Use InfiniteSamplerWrapper for an infinite sampler
        sampler = InfiniteSamplerWrapper(instance) if opt.use_infinite_sampler else None

        dataloader = torch.utils.data.DataLoader(
            instance,
            batch_size=opt.batchSize,
            sampler=sampler,
            shuffle=not opt.serial_batches if sampler is None else False,
            num_workers=int(opt.nThreads),
            drop_last=opt.isTrain
        )
        return dataloader