TianyuZhang commited on
Commit
7ac9860
·
verified ·
1 Parent(s): a7ab3af

Upload 28 files

Browse files
datasets/beans.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torchvision import datasets, transforms
3
+ from torch.utils.data import DataLoader
4
+ from typing import Tuple, Dict
5
+ import os
6
+
7
+
8
+ class BeanDataModule:
9
+ def __init__(
10
+ self,
11
+ preprocess: bool = True,
12
+ data_dir: str = "theRestDataset/beans",
13
+ batch_size: int = 32,
14
+ image_size: int = 224,
15
+ num_workers: int = 4,
16
+ pin_memory: bool = True,
17
+ ):
18
+ """
19
+ Initialize the Bean Disease DataModule.
20
+
21
+ Args:
22
+ data_dir (str): Root directory of the dataset
23
+ batch_size (int): Batch size for training and testing
24
+ image_size (int): Size of the input images
25
+ num_workers (int): Number of workers for data loading
26
+ pin_memory (bool): Whether to pin memory for GPU training
27
+ """
28
+ self.preprocess = preprocess
29
+ self.data_dir = os.path.join(data_dir, "beans")
30
+ self.batch_size = batch_size
31
+ self.image_size = image_size
32
+ self.num_workers = num_workers
33
+ self.pin_memory = pin_memory
34
+
35
+ self.train_transforms = None
36
+ self.test_transforms = None
37
+ self.train_dataset = None
38
+ self.test_dataset = None
39
+ self.train_loader = None
40
+ self.test_loader = None
41
+ self.class_names = None
42
+
43
+ self._setup_transforms()
44
+ self._setup_datasets()
45
+ self._setup_loaders()
46
+
47
+ def _setup_transforms(self) -> None:
48
+ """Set up data transforms for training and testing."""
49
+ if self.preprocess:
50
+ self.train_transforms = transforms.Compose(
51
+ [
52
+ transforms.RandomResizedCrop(self.image_size),
53
+ transforms.RandomHorizontalFlip(),
54
+ transforms.RandomRotation(15),
55
+ transforms.ToTensor(),
56
+ transforms.Normalize(
57
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
58
+ ),
59
+ ]
60
+ )
61
+
62
+ self.test_transforms = transforms.Compose(
63
+ [
64
+ transforms.Resize(self.image_size + 32),
65
+ transforms.CenterCrop(self.image_size),
66
+ transforms.ToTensor(),
67
+ transforms.Normalize(
68
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
69
+ ),
70
+ ]
71
+ )
72
+ else:
73
+ self.train_transforms = transforms.Compose(
74
+ [
75
+ transforms.Resize((self.image_size, self.image_size)),
76
+ transforms.ToTensor(),
77
+ ]
78
+ )
79
+
80
+ self.test_transforms = transforms.Compose(
81
+ [
82
+ transforms.Resize((self.image_size, self.image_size)),
83
+ transforms.ToTensor(),
84
+ ]
85
+ )
86
+
87
+ def _setup_datasets(self) -> None:
88
+ """Set up training and testing datasets."""
89
+ self.train_dataset = datasets.ImageFolder(
90
+ root=f"{self.data_dir}/train", transform=self.train_transforms
91
+ )
92
+
93
+ self.test_dataset = datasets.ImageFolder(
94
+ root=f"{self.data_dir}/test", transform=self.test_transforms
95
+ )
96
+
97
+ self.class_names = self.train_dataset.classes
98
+
99
+ def _setup_loaders(self) -> None:
100
+ """Set up data loaders for training and testing."""
101
+ self.train_loader = DataLoader(
102
+ self.train_dataset,
103
+ batch_size=self.batch_size,
104
+ shuffle=True,
105
+ num_workers=self.num_workers,
106
+ pin_memory=self.pin_memory,
107
+ )
108
+
109
+ self.test_loader = DataLoader(
110
+ self.test_dataset,
111
+ batch_size=self.batch_size,
112
+ shuffle=False,
113
+ num_workers=self.num_workers,
114
+ pin_memory=self.pin_memory,
115
+ )
116
+
117
+ def get_loaders(self) -> Tuple[DataLoader, DataLoader]:
118
+ """
119
+ Get train and test data loaders.
120
+
121
+ Returns:
122
+ tuple: (train_loader, test_loader)
123
+ """
124
+ return self.train_loader, self.test_loader
125
+
126
+ def get_dataset_info(self) -> Dict:
127
+ """
128
+ Get dataset information.
129
+
130
+ Returns:
131
+ dict: Dictionary containing dataset information
132
+ """
133
+ return {
134
+ "num_train_samples": len(self.train_dataset),
135
+ "num_test_samples": len(self.test_dataset),
136
+ "num_classes": len(self.class_names),
137
+ "classes": self.class_names,
138
+ "image_size": self.image_size,
139
+ "batch_size": self.batch_size,
140
+ }
141
+
142
+
143
+ class Beans:
144
+ def __init__(
145
+ self,
146
+ preprocess,
147
+ location=os.path.expanduser("~/data"),
148
+ batch_size=32,
149
+ num_workers=16,
150
+ ):
151
+ self.data_dir = location
152
+ self.batch_size = batch_size
153
+ self.num_workers = num_workers
154
+
155
+ self.dm = BeanDataModule(
156
+ preprocess=preprocess,
157
+ data_dir=self.data_dir,
158
+ batch_size=self.batch_size,
159
+ num_workers=self.num_workers,
160
+ )
161
+
162
+ self.train_loader, self.test_loader = self.dm.get_loaders()
163
+ self.dataset_info = self.dm.get_dataset_info()
164
+ self.train_dataset = self.dm.train_dataset
165
+ self.test_dataset = self.dm.test_dataset
166
+ self.classnames = self.dataset_info["classes"]
167
+ self.num_classes = self.dataset_info["num_classes"]
168
+ self.image_size = self.dataset_info["image_size"]
169
+ self.batch_size = self.dataset_info["batch_size"]
170
+ self.num_train_samples = self.dataset_info["num_train_samples"]
171
+ self.num_test_samples = self.dataset_info["num_test_samples"]
datasets/birds.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torchvision import datasets, transforms
3
+ from torch.utils.data import DataLoader, random_split
4
+ import os
5
+
6
+
7
+ class BirdsDataset:
8
+ def __init__(
9
+ self,
10
+ preprocess,
11
+ data_dir="theRestDataset/birds",
12
+ image_size=224,
13
+ val_split=0.2,
14
+ seed=42,
15
+ ):
16
+ self.preprocess = preprocess
17
+ self.data_dir = os.path.join(data_dir, "birds", "images")
18
+ self.image_size = image_size
19
+ self.val_split = val_split
20
+ self.seed = seed
21
+
22
+ # Define transforms for training and validation
23
+ if self.preprocess:
24
+ self.train_transforms = transforms.Compose(
25
+ [
26
+ transforms.RandomResizedCrop(self.image_size),
27
+ transforms.RandomHorizontalFlip(),
28
+ transforms.RandomRotation(15),
29
+ transforms.ToTensor(),
30
+ transforms.Normalize(
31
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
32
+ ),
33
+ ]
34
+ )
35
+
36
+ self.test_transforms = transforms.Compose(
37
+ [
38
+ transforms.Resize(self.image_size + 32),
39
+ transforms.CenterCrop(self.image_size),
40
+ transforms.ToTensor(),
41
+ transforms.Normalize(
42
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
43
+ ),
44
+ ]
45
+ )
46
+ else:
47
+ self.train_transforms = transforms.Compose(
48
+ [
49
+ transforms.Resize((self.image_size, self.image_size)),
50
+ transforms.ToTensor(),
51
+ ]
52
+ )
53
+
54
+ self.test_transforms = transforms.Compose(
55
+ [
56
+ transforms.Resize((self.image_size, self.image_size)),
57
+ transforms.ToTensor(),
58
+ ]
59
+ )
60
+
61
+ # Create datasets
62
+ self.setup()
63
+
64
+ def setup(self):
65
+ # Create full dataset
66
+ full_dataset = datasets.ImageFolder(root=self.data_dir)
67
+
68
+ # Calculate lengths for train and validation
69
+ total_length = len(full_dataset)
70
+ val_length = int(total_length * self.val_split)
71
+ train_length = total_length - val_length
72
+
73
+ # Create train/validation splits
74
+ generator = torch.Generator().manual_seed(self.seed)
75
+ self.train_dataset, self.test_dataset = random_split(
76
+ full_dataset, [train_length, val_length], generator=generator
77
+ )
78
+
79
+ # Apply transforms
80
+ self.train_dataset.dataset.transform = self.train_transforms
81
+ self.test_dataset.dataset.transform = self.test_transforms
82
+
83
+ # Store class information
84
+ self.classes = full_dataset.classes
85
+ self.class_to_idx = full_dataset.class_to_idx
86
+
87
+ def get_dataloaders(self, batch_size=32, num_workers=4):
88
+ train_loader = DataLoader(
89
+ self.train_dataset,
90
+ batch_size=batch_size,
91
+ shuffle=True,
92
+ num_workers=num_workers,
93
+ )
94
+
95
+ val_loader = DataLoader(
96
+ self.test_dataset,
97
+ batch_size=batch_size,
98
+ shuffle=False,
99
+ num_workers=num_workers,
100
+ )
101
+
102
+ return train_loader, val_loader
103
+
104
+
105
+ class Birds:
106
+ def __init__(
107
+ self,
108
+ preprocess,
109
+ location=os.path.expanduser("~/data"),
110
+ batch_size=32,
111
+ num_workers=16,
112
+ ):
113
+ self.dataset = BirdsDataset(preprocess, data_dir=location)
114
+ self.train_dataset, self.test_dataset = (
115
+ self.dataset.train_dataset,
116
+ self.dataset.test_dataset,
117
+ )
118
+ self.train_loader, self.test_loader = self.dataset.get_dataloaders(
119
+ batch_size=batch_size, num_workers=num_workers
120
+ )
121
+ self.classnames = self.dataset.classes
122
+ idx_to_class = dict((v, k) for k, v in self.dataset.class_to_idx.items())
123
+ self.classnames = [
124
+ idx_to_class[i].split(".")[-1].replace("_", " ").replace("-", " ").lower()
125
+ for i in range(len(idx_to_class))
126
+ ]
datasets/cars.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torchvision.datasets as datasets
4
+
5
+
6
+ import pathlib
7
+ from typing import Callable, Optional, Any, Tuple
8
+
9
+ from PIL import Image
10
+
11
+ from torchvision.datasets.utils import (
12
+ download_and_extract_archive,
13
+ download_url,
14
+ verify_str_arg,
15
+ )
16
+ from torchvision.datasets.vision import VisionDataset
17
+
18
+
19
+ class PytorchStanfordCars(VisionDataset):
20
+ """`Stanford Cars <https://ai.stanford.edu/~jkrause/cars/car_dataset.html>`_ Dataset
21
+
22
+ The Cars dataset contains 16,185 images of 196 classes of cars. The data is
23
+ split into 8,144 training images and 8,041 testing images, where each class
24
+ has been split roughly in a 50-50 split
25
+
26
+ .. note::
27
+
28
+ This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
29
+
30
+ Args:
31
+ root (string): Root directory of dataset
32
+ split (string, optional): The dataset split, supports ``"train"`` (default) or ``"test"``.
33
+ transform (callable, optional): A function/transform that takes in an PIL image
34
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
35
+ target_transform (callable, optional): A function/transform that takes in the
36
+ target and transforms it.
37
+ download (bool, optional): If True, downloads the dataset from the internet and
38
+ puts it in root directory. If dataset is already downloaded, it is not
39
+ downloaded again."""
40
+
41
+ def __init__(
42
+ self,
43
+ root: str,
44
+ split: str = "train",
45
+ transform: Optional[Callable] = None,
46
+ target_transform: Optional[Callable] = None,
47
+ download: bool = False,
48
+ ) -> None:
49
+
50
+ try:
51
+ import scipy.io as sio
52
+ except ImportError:
53
+ raise RuntimeError(
54
+ "Scipy is not found. This dataset needs to have scipy installed: pip install scipy"
55
+ )
56
+
57
+ super().__init__(root, transform=transform, target_transform=target_transform)
58
+
59
+ self._split = verify_str_arg(split, "split", ("train", "test"))
60
+ self._base_folder = pathlib.Path(root) / "stanford_cars"
61
+ devkit = self._base_folder / "devkit"
62
+
63
+ if self._split == "train":
64
+ self._annotations_mat_path = devkit / "cars_train_annos.mat"
65
+ self._images_base_path = self._base_folder / "cars_train"
66
+ else:
67
+ self._annotations_mat_path = devkit / "cars_test_annos_withlabels.mat"
68
+ self._images_base_path = self._base_folder / "cars_test"
69
+
70
+ if download:
71
+ self.download()
72
+
73
+ if not self._check_exists():
74
+ raise RuntimeError(
75
+ "Dataset not found. You can use download=True to download it"
76
+ )
77
+
78
+ self._samples = [
79
+ (
80
+ str(self._images_base_path / annotation["fname"]),
81
+ annotation["class"]
82
+ - 1, # Original target mapping starts from 1, hence -1
83
+ )
84
+ for annotation in sio.loadmat(self._annotations_mat_path, squeeze_me=True)[
85
+ "annotations"
86
+ ]
87
+ ]
88
+
89
+ self.classes = sio.loadmat(str(devkit / "cars_meta.mat"), squeeze_me=True)[
90
+ "class_names"
91
+ ].tolist()
92
+ self.class_to_idx = {cls: i for i, cls in enumerate(self.classes)}
93
+
94
+ def __len__(self) -> int:
95
+ return len(self._samples)
96
+
97
+ def __getitem__(self, idx: int) -> Tuple[Any, Any]:
98
+ """Returns pil_image and class_id for given index"""
99
+ image_path, target = self._samples[idx]
100
+ pil_image = Image.open(image_path).convert("RGB")
101
+
102
+ if self.transform is not None:
103
+ pil_image = self.transform(pil_image)
104
+ if self.target_transform is not None:
105
+ target = self.target_transform(target)
106
+ return pil_image, target
107
+
108
+ def download(self) -> None:
109
+ if self._check_exists():
110
+ return
111
+
112
+ download_and_extract_archive(
113
+ url="https://ai.stanford.edu/~jkrause/cars/car_devkit.tgz",
114
+ download_root=str(self._base_folder),
115
+ md5="c3b158d763b6e2245038c8ad08e45376",
116
+ )
117
+ if self._split == "train":
118
+ download_and_extract_archive(
119
+ url="https://ai.stanford.edu/~jkrause/car196/cars_train.tgz",
120
+ download_root=str(self._base_folder),
121
+ md5="065e5b463ae28d29e77c1b4b166cfe61",
122
+ )
123
+ else:
124
+ download_and_extract_archive(
125
+ url="https://ai.stanford.edu/~jkrause/car196/cars_test.tgz",
126
+ download_root=str(self._base_folder),
127
+ md5="4ce7ebf6a94d07f1952d94dd34c4d501",
128
+ )
129
+ download_url(
130
+ url="https://ai.stanford.edu/~jkrause/car196/cars_test_annos_withlabels.mat",
131
+ root=str(self._base_folder),
132
+ md5="b0a2b23655a3edd16d84508592a98d10",
133
+ )
134
+
135
+ def _check_exists(self) -> bool:
136
+ if not (self._base_folder / "devkit").is_dir():
137
+ return False
138
+
139
+ return self._annotations_mat_path.exists() and self._images_base_path.is_dir()
140
+
141
+
142
+ class Cars:
143
+ def __init__(
144
+ self,
145
+ preprocess,
146
+ location=os.path.expanduser("~/data"),
147
+ batch_size=32,
148
+ num_workers=16,
149
+ ):
150
+ # Data loading code
151
+
152
+ self.train_dataset = PytorchStanfordCars(
153
+ location, "train", preprocess, download=False
154
+ )
155
+ self.train_loader = torch.utils.data.DataLoader(
156
+ self.train_dataset,
157
+ shuffle=True,
158
+ batch_size=batch_size,
159
+ num_workers=num_workers,
160
+ )
161
+
162
+ self.test_dataset = PytorchStanfordCars(
163
+ location, "test", preprocess, download=False
164
+ )
165
+ self.test_loader = torch.utils.data.DataLoader(
166
+ self.test_dataset, batch_size=batch_size, num_workers=num_workers
167
+ )
168
+ idx_to_class = dict((v, k) for k, v in self.train_dataset.class_to_idx.items())
169
+ self.classnames = [
170
+ idx_to_class[i].replace("_", " ") for i in range(len(idx_to_class))
171
+ ]
datasets/cifar10.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import PIL
3
+ import torch
4
+ import numpy as np
5
+ import torchvision
6
+ from torchvision import transforms
7
+ from torchvision.datasets import CIFAR10 as PyTorchCIFAR10
8
+ from torchvision.datasets import VisionDataset
9
+
10
+ cifar_classnames = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
11
+
12
+ class CIFAR10:
13
+ def __init__(self, preprocess,
14
+ location=os.path.expanduser('~/data'),
15
+ batch_size=128,
16
+ num_workers=16):
17
+
18
+
19
+ self.train_dataset = PyTorchCIFAR10(
20
+ root=location, download=True, train=True, transform=preprocess
21
+ )
22
+
23
+ self.train_loader = torch.utils.data.DataLoader(
24
+ self.train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers
25
+ )
26
+
27
+ self.test_dataset = PyTorchCIFAR10(
28
+ root=location, download=True, train=False, transform=preprocess
29
+ )
30
+
31
+ self.test_loader = torch.utils.data.DataLoader(
32
+ self.test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers
33
+ )
34
+
35
+ self.classnames = self.test_dataset.classes
36
+
37
+ def convert(x):
38
+ if isinstance(x, np.ndarray):
39
+ return torchvision.transforms.functional.to_pil_image(x)
40
+ return x
41
+
42
+ class BasicVisionDataset(VisionDataset):
43
+ def __init__(self, images, targets, transform=None, target_transform=None):
44
+ if transform is not None:
45
+ transform.transforms.insert(0, convert)
46
+ super(BasicVisionDataset, self).__init__(root=None, transform=transform, target_transform=target_transform)
47
+ assert len(images) == len(targets)
48
+
49
+ self.images = images
50
+ self.targets = targets
51
+
52
+ def __getitem__(self, index):
53
+ return self.transform(self.images[index]), self.targets[index]
54
+
55
+ def __len__(self):
56
+ return len(self.targets)
datasets/cifar100.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from torchvision.datasets import CIFAR100 as PyTorchCIFAR100
4
+
5
+ class CIFAR100:
6
+ def __init__(self,
7
+ preprocess,
8
+ location=os.path.expanduser('~/data'),
9
+ batch_size=128,
10
+ num_workers=16):
11
+
12
+ self.train_dataset = PyTorchCIFAR100(
13
+ root=location, download=True, train=True, transform=preprocess
14
+ )
15
+
16
+ self.train_loader = torch.utils.data.DataLoader(
17
+ self.train_dataset, batch_size=batch_size, num_workers=num_workers
18
+ )
19
+
20
+ self.test_dataset = PyTorchCIFAR100(
21
+ root=location, download=True, train=False, transform=preprocess
22
+ )
23
+
24
+ self.test_loader = torch.utils.data.DataLoader(
25
+ self.test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers
26
+ )
27
+
28
+ self.classnames = self.test_dataset.classes
29
+
30
+
datasets/common.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import json
4
+ import glob
5
+ import collections
6
+ import random
7
+
8
+ import numpy as np
9
+
10
+ from tqdm import tqdm
11
+
12
+ import torchvision.datasets as datasets
13
+ from torch.utils.data import Dataset, DataLoader, Sampler
14
+
15
+
16
+ class SubsetSampler(Sampler):
17
+ def __init__(self, indices):
18
+ self.indices = indices
19
+
20
+ def __iter__(self):
21
+ return (i for i in self.indices)
22
+
23
+ def __len__(self):
24
+ return len(self.indices)
25
+
26
+
27
+ class ImageFolderWithPaths(datasets.ImageFolder):
28
+ def __init__(self, path, transform, flip_label_prob=0.0):
29
+ super().__init__(path, transform)
30
+ self.flip_label_prob = flip_label_prob
31
+ if self.flip_label_prob > 0:
32
+ print(f"Flipping labels with probability {self.flip_label_prob}")
33
+ num_classes = len(self.classes)
34
+ for i in range(len(self.samples)):
35
+ if random.random() < self.flip_label_prob:
36
+ new_label = random.randint(0, num_classes - 1)
37
+ self.samples[i] = (self.samples[i][0], new_label)
38
+
39
+ def __getitem__(self, index):
40
+ image, label = super(ImageFolderWithPaths, self).__getitem__(index)
41
+ return {"images": image, "labels": label, "image_paths": self.samples[index][0]}
42
+
43
+
44
+ def maybe_dictionarize(batch):
45
+ if isinstance(batch, dict):
46
+ return batch
47
+
48
+ if len(batch) == 2:
49
+ batch = {"images": batch[0], "labels": batch[1]}
50
+ elif len(batch) == 3:
51
+ batch = {"images": batch[0], "labels": batch[1], "metadata": batch[2]}
52
+ else:
53
+ raise ValueError(f"Unexpected number of elements: {len(batch)}")
54
+
55
+ return batch
56
+
57
+
58
+ def get_features_helper(image_encoder, dataloader, device):
59
+ all_data = collections.defaultdict(list)
60
+
61
+ image_encoder = image_encoder.to(device)
62
+ image_encoder = torch.nn.DataParallel(
63
+ image_encoder, device_ids=[x for x in range(torch.cuda.device_count())]
64
+ )
65
+ image_encoder.eval()
66
+
67
+ with torch.no_grad():
68
+ for batch in tqdm(dataloader):
69
+ batch = maybe_dictionarize(batch)
70
+ features = image_encoder(batch["images"].cuda())
71
+
72
+ all_data["features"].append(features.cpu())
73
+
74
+ for key, val in batch.items():
75
+ if key == "images":
76
+ continue
77
+ if hasattr(val, "cpu"):
78
+ val = val.cpu()
79
+ all_data[key].append(val)
80
+ else:
81
+ all_data[key].extend(val)
82
+
83
+ for key, val in all_data.items():
84
+ if torch.is_tensor(val[0]):
85
+ all_data[key] = torch.cat(val).numpy()
86
+
87
+ return all_data
88
+
89
+
90
+ def get_features(is_train, image_encoder, dataset, device):
91
+ split = "train" if is_train else "val"
92
+ dname = type(dataset).__name__
93
+ if image_encoder.cache_dir is not None:
94
+ cache_dir = f"{image_encoder.cache_dir}/{dname}/{split}"
95
+ cached_files = glob.glob(f"{cache_dir}/*")
96
+ if image_encoder.cache_dir is not None and len(cached_files) > 0:
97
+ print(f"Getting features from {cache_dir}")
98
+ data = {}
99
+ for cached_file in cached_files:
100
+ name = os.path.splitext(os.path.basename(cached_file))[0]
101
+ data[name] = torch.load(cached_file)
102
+ else:
103
+ print(f"Did not find cached features at {cache_dir}. Building from scratch.")
104
+ loader = dataset.train_loader if is_train else dataset.test_loader
105
+ data = get_features_helper(image_encoder, loader, device)
106
+ if image_encoder.cache_dir is None:
107
+ print("Not caching because no cache directory was passed.")
108
+ else:
109
+ os.makedirs(cache_dir, exist_ok=True)
110
+ print(f"Caching data at {cache_dir}")
111
+ for name, val in data.items():
112
+ torch.save(val, f"{cache_dir}/{name}.pt")
113
+ return data
114
+
115
+
116
+ class FeatureDataset(Dataset):
117
+ def __init__(self, is_train, image_encoder, dataset, device):
118
+ self.data = get_features(is_train, image_encoder, dataset, device)
119
+
120
+ def __len__(self):
121
+ return len(self.data["features"])
122
+
123
+ def __getitem__(self, idx):
124
+ data = {k: v[idx] for k, v in self.data.items()}
125
+ data["features"] = torch.from_numpy(data["features"]).float()
126
+ return data
127
+
128
+
129
+ def get_dataloader(dataset, is_train, args, image_encoder=None, num_samples=None):
130
+ if image_encoder is not None:
131
+ feature_dataset = FeatureDataset(is_train, image_encoder, dataset, args.device)
132
+ dataloader = DataLoader(
133
+ feature_dataset, batch_size=args.batch_size, shuffle=is_train
134
+ )
135
+ else:
136
+ dataloader = dataset.train_loader if is_train else dataset.test_loader
137
+ if num_samples is not None:
138
+ # indices = np.random.choice(len(dataloader.dataset), num_samples, replace=False)
139
+ if is_train:
140
+ indices = list(range(min(num_samples, len(dataset.train_dataset))))
141
+ else:
142
+ indices = list(range(min(num_samples, len(dataset.test_dataset))))
143
+ dataloader = DataLoader(
144
+ dataloader.dataset,
145
+ batch_size=args.batch_size,
146
+ sampler=SubsetSampler(indices),
147
+ )
148
+ return dataloader
149
+
150
+
151
+ def get_dataloaders(datasets, is_train, args, image_encoder=None):
152
+ dataloaders = {}
153
+ for dataset_name, dataset in datasets.items():
154
+ dataloaders[dataset_name] = get_dataloader(
155
+ dataset, is_train, args, image_encoder
156
+ )
157
+ return dataloaders
158
+
159
+
160
+ def get_dataiters(dataloaders):
161
+ dataiters = {}
162
+ for dataset_name, dataloader in dataloaders.items():
163
+ dataiters[dataset_name] = iter(dataloader)
164
+ return dataiters
datasets/common2.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ import torchvision.transforms as transforms
4
+ from torch.utils.data import DataLoader
5
+ import os
6
+ from typing import Tuple, Optional
7
+ from abc import ABC, abstractmethod
8
+
9
+
10
+ class BaseDatasetLoader(ABC):
11
+ """Base class for all dataset loaders"""
12
+
13
+ def __init__(
14
+ self, root_dir: str = "./data", batch_size: int = 32, num_workers: int = 4
15
+ ):
16
+ self.root_dir = root_dir
17
+ self.batch_size = batch_size
18
+ self.num_workers = num_workers
19
+ os.makedirs(root_dir, exist_ok=True)
20
+
21
+ def _create_dataloaders(
22
+ self,
23
+ train_dataset: torch.utils.data.Dataset,
24
+ test_dataset: torch.utils.data.Dataset,
25
+ ) -> Tuple[DataLoader, DataLoader]:
26
+ """Create DataLoader objects for training and testing"""
27
+ train_loader = DataLoader(
28
+ train_dataset,
29
+ batch_size=self.batch_size,
30
+ shuffle=True,
31
+ num_workers=self.num_workers,
32
+ )
33
+
34
+ test_loader = DataLoader(
35
+ test_dataset,
36
+ batch_size=self.batch_size,
37
+ shuffle=False,
38
+ num_workers=self.num_workers,
39
+ )
40
+
41
+ return train_loader, test_loader
42
+
43
+ @abstractmethod
44
+ def load_dataset(self) -> Tuple[DataLoader, DataLoader]:
45
+ """Load dataset and return train and test dataloaders"""
46
+ pass
47
+
48
+
49
+ class RGBDatasetLoader(BaseDatasetLoader):
50
+ """Base class for RGB image datasets"""
51
+
52
+ def __init__(self, preprocess, *args, **kwargs):
53
+ super().__init__(*args, **kwargs)
54
+ self.preprocess = preprocess
55
+ if self.preprocess:
56
+ self.train_transform = transforms.Compose(
57
+ [
58
+ transforms.RandomResizedCrop(224),
59
+ transforms.RandomHorizontalFlip(),
60
+ transforms.ToTensor(),
61
+ transforms.Normalize(
62
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
63
+ ),
64
+ ]
65
+ )
66
+
67
+ self.test_transform = transforms.Compose(
68
+ [
69
+ transforms.Resize(256),
70
+ transforms.CenterCrop(224),
71
+ transforms.ToTensor(),
72
+ transforms.Normalize(
73
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
74
+ ),
75
+ ]
76
+ )
77
+ else:
78
+ self.train_transform = transforms.Compose(
79
+ [
80
+ transforms.Resize((224, 224)),
81
+ transforms.ToTensor(),
82
+ ]
83
+ )
84
+
85
+ self.test_transform = transforms.Compose(
86
+ [
87
+ transforms.Resize((224, 224)),
88
+ transforms.ToTensor(),
89
+ ]
90
+ )
91
+
92
+
93
+ class GrayscaleDatasetLoader(BaseDatasetLoader):
94
+ """Base class for grayscale image datasets with RGB conversion"""
95
+
96
+ def __init__(self, preprocess, *args, **kwargs):
97
+ super().__init__(*args, **kwargs)
98
+ if preprocess:
99
+ self.train_transform = transforms.Compose(
100
+ [
101
+ transforms.RandomResizedCrop(224),
102
+ transforms.RandomHorizontalFlip(),
103
+ transforms.ToTensor(),
104
+ transforms.Lambda(
105
+ lambda x: x.repeat(3, 1, 1)
106
+ ), # Convert to RGB by repeating channels
107
+ transforms.Normalize(
108
+ mean=[0.485, 0.456, 0.406], # ImageNet normalization for RGB
109
+ std=[0.229, 0.224, 0.225],
110
+ ),
111
+ ]
112
+ )
113
+
114
+ self.test_transform = transforms.Compose(
115
+ [
116
+ transforms.Resize(256),
117
+ transforms.CenterCrop(224),
118
+ transforms.ToTensor(),
119
+ transforms.Lambda(
120
+ lambda x: x.repeat(3, 1, 1)
121
+ ), # Convert to RGB by repeating channels
122
+ transforms.Normalize(
123
+ mean=[0.485, 0.456, 0.406], # ImageNet normalization for RGB
124
+ std=[0.229, 0.224, 0.225],
125
+ ),
126
+ ]
127
+ )
128
+ else:
129
+ self.train_transform = transforms.Compose(
130
+ [
131
+ transforms.Resize((224, 224)),
132
+ transforms.ToTensor(),
133
+ transforms.Lambda(
134
+ lambda x: x.repeat(3, 1, 1)
135
+ ), # Convert to RGB by repeating channels
136
+ ]
137
+ )
138
+
139
+ self.test_transform = transforms.Compose(
140
+ [
141
+ transforms.Resize((224, 224)),
142
+ transforms.ToTensor(),
143
+ transforms.Lambda(
144
+ lambda x: x.repeat(3, 1, 1)
145
+ ), # Convert to RGB by repeating channels
146
+ ]
147
+ )
datasets/dogs.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torchvision import datasets, transforms
3
+ from torch.utils.data import DataLoader, random_split
4
+ import os
5
+
6
+
7
+ class DogsDataset:
8
+ def __init__(
9
+ self,
10
+ preprocess,
11
+ data_dir="theRestDataset/dogs",
12
+ image_size=224,
13
+ val_split=0.2,
14
+ seed=42,
15
+ ):
16
+ self.preprocess = preprocess
17
+ self.data_dir = os.path.join(data_dir, "dogs")
18
+ self.image_size = image_size
19
+ self.val_split = val_split
20
+ self.seed = seed
21
+
22
+ # Define transforms for training and validation
23
+ if self.preprocess:
24
+ self.train_transforms = transforms.Compose(
25
+ [
26
+ transforms.RandomResizedCrop(self.image_size),
27
+ transforms.RandomHorizontalFlip(),
28
+ transforms.RandomRotation(15),
29
+ transforms.ToTensor(),
30
+ transforms.Normalize(
31
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
32
+ ),
33
+ ]
34
+ )
35
+
36
+ self.test_transforms = transforms.Compose(
37
+ [
38
+ transforms.Resize(self.image_size + 32),
39
+ transforms.CenterCrop(self.image_size),
40
+ transforms.ToTensor(),
41
+ transforms.Normalize(
42
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
43
+ ),
44
+ ]
45
+ )
46
+ else:
47
+ self.train_transforms = transforms.Compose(
48
+ [
49
+ transforms.Resize((self.image_size, self.image_size)),
50
+ transforms.ToTensor(),
51
+ ]
52
+ )
53
+
54
+ self.test_transforms = transforms.Compose(
55
+ [
56
+ transforms.Resize((self.image_size, self.image_size)),
57
+ transforms.ToTensor(),
58
+ ]
59
+ )
60
+
61
+ # Create datasets
62
+ self.setup()
63
+
64
+ def setup(self):
65
+ # Create full dataset
66
+ full_dataset = datasets.ImageFolder(root=self.data_dir)
67
+
68
+ # Calculate lengths for train and validation
69
+ total_length = len(full_dataset)
70
+ val_length = int(total_length * self.val_split)
71
+ train_length = total_length - val_length
72
+
73
+ # Create train/validation splits
74
+ generator = torch.Generator().manual_seed(self.seed)
75
+ self.train_dataset, self.test_dataset = random_split(
76
+ full_dataset, [train_length, val_length], generator=generator
77
+ )
78
+
79
+ # Apply transforms
80
+ self.train_dataset.dataset.transform = self.train_transforms
81
+ self.test_dataset.dataset.transform = self.test_transforms
82
+
83
+ # Store class information
84
+ self.classes = full_dataset.classes
85
+ self.class_to_idx = full_dataset.class_to_idx
86
+
87
+ def get_dataloaders(self, batch_size=32, num_workers=4):
88
+ train_loader = DataLoader(
89
+ self.train_dataset,
90
+ batch_size=batch_size,
91
+ shuffle=True,
92
+ num_workers=num_workers,
93
+ )
94
+
95
+ val_loader = DataLoader(
96
+ self.test_dataset,
97
+ batch_size=batch_size,
98
+ shuffle=False,
99
+ num_workers=num_workers,
100
+ )
101
+
102
+ return train_loader, val_loader
103
+
104
+
105
+ class Dogs:
106
+ def __init__(
107
+ self,
108
+ preprocess,
109
+ location=os.path.expanduser("~/data"),
110
+ batch_size=32,
111
+ num_workers=16,
112
+ ):
113
+ self.dataset = DogsDataset(preprocess, data_dir=location)
114
+ self.train_dataset, self.test_dataset = (
115
+ self.dataset.train_dataset,
116
+ self.dataset.test_dataset,
117
+ )
118
+ self.train_loader, self.test_loader = self.dataset.get_dataloaders(
119
+ batch_size=batch_size, num_workers=num_workers
120
+ )
121
+ self.classnames = self.dataset.classes
122
+ idx_to_class = dict((v, k) for k, v in self.dataset.class_to_idx.items())
123
+ self.classnames = []
124
+ for i in range(len(idx_to_class)):
125
+ self.classnames.append(
126
+ "_".join(idx_to_class[i].split("-")[1:]).replace("_", " ").lower()
127
+ )
datasets/dtd.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torchvision.datasets as datasets
4
+
5
+
6
+ class DTD:
7
+ def __init__(self,
8
+ preprocess,
9
+ location=os.path.expanduser('~/data'),
10
+ batch_size=32,
11
+ num_workers=16):
12
+ # Data loading code
13
+ traindir = os.path.join(location, 'dtd', 'train')
14
+ valdir = os.path.join(location, 'dtd', 'val')
15
+
16
+ self.train_dataset = datasets.ImageFolder(
17
+ traindir, transform=preprocess)
18
+ self.train_loader = torch.utils.data.DataLoader(
19
+ self.train_dataset,
20
+ shuffle=True,
21
+ batch_size=batch_size,
22
+ num_workers=num_workers,
23
+ )
24
+
25
+ self.test_dataset = datasets.ImageFolder(valdir, transform=preprocess)
26
+ self.test_loader = torch.utils.data.DataLoader(
27
+ self.test_dataset,
28
+ batch_size=batch_size,
29
+ num_workers=num_workers
30
+ )
31
+ idx_to_class = dict((v, k)
32
+ for k, v in self.train_dataset.class_to_idx.items())
33
+ self.classnames = [idx_to_class[i].replace(
34
+ '_', ' ') for i in range(len(idx_to_class))]
datasets/eurosat.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torchvision.datasets as datasets
4
+ import re
5
+
6
+ def pretify_classname(classname):
7
+ l = re.findall(r'[A-Z](?:[a-z]+|[A-Z]*(?=[A-Z]|$))', classname)
8
+ l = [i.lower() for i in l]
9
+ out = ' '.join(l)
10
+ if out.endswith('al'):
11
+ return out + ' area'
12
+ return out
13
+
14
+ class EuroSATBase:
15
+ def __init__(self,
16
+ preprocess,
17
+ test_split,
18
+ location='~/datasets',
19
+ batch_size=32,
20
+ num_workers=16):
21
+ # Data loading code
22
+ traindir = os.path.join(location, 'EuroSAT_splits', 'train')
23
+ testdir = os.path.join(location, 'EuroSAT_splits', test_split)
24
+
25
+
26
+ self.train_dataset = datasets.ImageFolder(traindir, transform=preprocess)
27
+ self.train_loader = torch.utils.data.DataLoader(
28
+ self.train_dataset,
29
+ shuffle=True,
30
+ batch_size=batch_size,
31
+ num_workers=num_workers,
32
+ )
33
+
34
+ self.test_dataset = datasets.ImageFolder(testdir, transform=preprocess)
35
+ self.test_loader = torch.utils.data.DataLoader(
36
+ self.test_dataset,
37
+ batch_size=batch_size,
38
+ num_workers=num_workers
39
+ )
40
+ idx_to_class = dict((v, k)
41
+ for k, v in self.train_dataset.class_to_idx.items())
42
+ self.classnames = [idx_to_class[i].replace('_', ' ') for i in range(len(idx_to_class))]
43
+ self.classnames = [pretify_classname(c) for c in self.classnames]
44
+ ours_to_open_ai = {
45
+ 'annual crop': 'annual crop land',
46
+ 'forest': 'forest',
47
+ 'herbaceous vegetation': 'brushland or shrubland',
48
+ 'highway': 'highway or road',
49
+ 'industrial area': 'industrial buildings or commercial buildings',
50
+ 'pasture': 'pasture land',
51
+ 'permanent crop': 'permanent crop land',
52
+ 'residential area': 'residential buildings or homes or apartments',
53
+ 'river': 'river',
54
+ 'sea lake': 'lake or sea',
55
+ }
56
+ for i in range(len(self.classnames)):
57
+ self.classnames[i] = ours_to_open_ai[self.classnames[i]]
58
+
59
+
60
+ class EuroSAT(EuroSATBase):
61
+ def __init__(self,
62
+ preprocess,
63
+ location='~/datasets',
64
+ batch_size=32,
65
+ num_workers=16):
66
+ super().__init__(preprocess, 'test', location, batch_size, num_workers)
67
+
68
+
69
+ class EuroSATVal(EuroSATBase):
70
+ def __init__(self,
71
+ preprocess,
72
+ location='~/datasets',
73
+ batch_size=32,
74
+ num_workers=16):
75
+ super().__init__(preprocess, 'val', location, batch_size, num_workers)
datasets/fashionmnist.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.datasets.common2 import GrayscaleDatasetLoader
2
+ import torchvision
3
+ from torch.utils.data import DataLoader
4
+ from typing import Tuple
5
+ import os
6
+
7
+
8
+ class FashionMNISTLoader(GrayscaleDatasetLoader):
9
+ """Loader for Fashion-MNIST dataset"""
10
+
11
+ def load_dataset(self) -> Tuple[DataLoader, DataLoader]:
12
+ train_dataset = torchvision.datasets.FashionMNIST(
13
+ root=self.root_dir,
14
+ train=True,
15
+ transform=self.train_transform,
16
+ download=True,
17
+ )
18
+
19
+ test_dataset = torchvision.datasets.FashionMNIST(
20
+ root=self.root_dir,
21
+ train=False,
22
+ transform=self.test_transform,
23
+ download=True,
24
+ )
25
+
26
+ return (
27
+ train_dataset,
28
+ test_dataset,
29
+ self._create_dataloaders(train_dataset, test_dataset),
30
+ )
31
+
32
+ @property
33
+ def classes(self) -> list:
34
+ """Get Fashion-MNIST class labels"""
35
+ return [
36
+ "Top",
37
+ "Trouser",
38
+ "Pullover",
39
+ "Dress",
40
+ "Coat",
41
+ "Sandal",
42
+ "Shirt",
43
+ "Sneaker",
44
+ "Bag",
45
+ "Ankle boot",
46
+ ]
47
+
48
+
49
+ class FashionMNIST:
50
+ def __init__(
51
+ self,
52
+ preprocess,
53
+ location=os.path.expanduser("~/data"),
54
+ batch_size=32,
55
+ num_workers=16,
56
+ ):
57
+ self.loader = FashionMNISTLoader(preprocess, location, batch_size, num_workers)
58
+ self.train_dataset, self.test_dataset, loaders = self.loader.load_dataset()
59
+ self.train_loader, self.test_loader = loaders
60
+ self.classnames = self.loader.classes
61
+ idx_to_class = dict((v, k) for k, v in self.train_dataset.class_to_idx.items())
62
+ self.classnames = [
63
+ idx_to_class[i].replace("_", " ").lower() for i in range(len(idx_to_class))
64
+ ]
datasets/flowers.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torchvision import datasets, transforms
3
+ from torch.utils.data import DataLoader, random_split
4
+ import os
5
+
6
+
7
+ class FlowerDataset:
8
+ def __init__(
9
+ self,
10
+ preprocess,
11
+ data_dir="theRestDataset/flowers",
12
+ image_size=224,
13
+ val_split=0.2,
14
+ seed=42,
15
+ ):
16
+ self.preprocess = preprocess
17
+ self.data_dir = os.path.join(data_dir, "flowers")
18
+ self.image_size = image_size
19
+ self.val_split = val_split
20
+ self.seed = seed
21
+
22
+ # Define transforms for training and validation
23
+ if self.preprocess:
24
+ self.train_transforms = transforms.Compose(
25
+ [
26
+ transforms.RandomResizedCrop(self.image_size),
27
+ transforms.RandomHorizontalFlip(),
28
+ transforms.RandomRotation(15),
29
+ transforms.ToTensor(),
30
+ transforms.Normalize(
31
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
32
+ ),
33
+ ]
34
+ )
35
+
36
+ self.test_transforms = transforms.Compose(
37
+ [
38
+ transforms.Resize(self.image_size + 32),
39
+ transforms.CenterCrop(self.image_size),
40
+ transforms.ToTensor(),
41
+ transforms.Normalize(
42
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
43
+ ),
44
+ ]
45
+ )
46
+ else:
47
+ self.train_transforms = transforms.Compose(
48
+ [
49
+ transforms.Resize((self.image_size, self.image_size)),
50
+ transforms.ToTensor(),
51
+ ]
52
+ )
53
+
54
+ self.test_transforms = transforms.Compose(
55
+ [
56
+ transforms.Resize((self.image_size, self.image_size)),
57
+ transforms.ToTensor(),
58
+ ]
59
+ )
60
+
61
+ # Create datasets
62
+ self.setup()
63
+
64
+ def setup(self):
65
+ # Create full dataset
66
+ full_dataset = datasets.ImageFolder(root=self.data_dir)
67
+
68
+ # Calculate lengths for train and validation
69
+ total_length = len(full_dataset)
70
+ val_length = int(total_length * self.val_split)
71
+ train_length = total_length - val_length
72
+
73
+ # Create train/validation splits
74
+ generator = torch.Generator().manual_seed(self.seed)
75
+ self.train_dataset, self.test_dataset = random_split(
76
+ full_dataset, [train_length, val_length], generator=generator
77
+ )
78
+
79
+ # Apply transforms
80
+ self.train_dataset.dataset.transform = self.train_transforms
81
+ self.test_dataset.dataset.transform = self.test_transforms
82
+
83
+ # Store class information
84
+ self.classes = full_dataset.classes
85
+ self.class_to_idx = full_dataset.class_to_idx
86
+
87
+ def get_dataloaders(self, batch_size=32, num_workers=4):
88
+ train_loader = DataLoader(
89
+ self.train_dataset,
90
+ batch_size=batch_size,
91
+ shuffle=True,
92
+ num_workers=num_workers,
93
+ )
94
+
95
+ val_loader = DataLoader(
96
+ self.test_dataset,
97
+ batch_size=batch_size,
98
+ shuffle=False,
99
+ num_workers=num_workers,
100
+ )
101
+
102
+ return train_loader, val_loader
103
+
104
+
105
+ class Flowers:
106
+ def __init__(
107
+ self,
108
+ preprocess,
109
+ location=os.path.expanduser("~/data"),
110
+ batch_size=32,
111
+ num_workers=16,
112
+ ):
113
+ self.dataset = FlowerDataset(preprocess, data_dir=location)
114
+ self.train_dataset, self.test_dataset = (
115
+ self.dataset.train_dataset,
116
+ self.dataset.test_dataset,
117
+ )
118
+ self.train_loader, self.test_loader = self.dataset.get_dataloaders(
119
+ batch_size=batch_size, num_workers=num_workers
120
+ )
121
+ self.classnames = self.dataset.classes
122
+ idx_to_class = dict((v, k) for k, v in self.dataset.class_to_idx.items())
123
+ self.classnames = [
124
+ idx_to_class[i].replace("_", " ").lower() for i in range(len(idx_to_class))
125
+ ]
datasets/food101.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.datasets.common2 import RGBDatasetLoader
2
+ import torchvision
3
+ from torch.utils.data import DataLoader
4
+ from typing import Tuple
5
+
6
+
7
+ class Food101Loader(RGBDatasetLoader):
8
+ """Loader for Food-101 dataset"""
9
+
10
+ def load_dataset(self) -> Tuple[DataLoader, DataLoader]:
11
+ train_dataset = torchvision.datasets.Food101(
12
+ root=self.root_dir,
13
+ split="train",
14
+ transform=self.train_transform,
15
+ download=True,
16
+ )
17
+
18
+ test_dataset = torchvision.datasets.Food101(
19
+ root=self.root_dir,
20
+ split="test",
21
+ transform=self.test_transform,
22
+ download=True,
23
+ )
24
+
25
+ return self._create_dataloaders(train_dataset, test_dataset)
datasets/garbage.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Optional, Callable, Tuple
3
+ import torch
4
+ from torchvision import datasets, transforms
5
+ from torch.utils.data import random_split, DataLoader
6
+
7
+
8
+ class GarbageDataset:
9
+ def __init__(
10
+ self,
11
+ preprocess,
12
+ root_dir: str = "theRestDataset/garbage/data",
13
+ image_size: int = 224,
14
+ val_split: float = 0.2,
15
+ seed: int = 42,
16
+ ):
17
+ """
18
+ Initialize the Garbage Dataset.
19
+
20
+ Args:
21
+ root_dir (str): Path to the dataset root directory
22
+ image_size (int): Size of the input images
23
+ val_split (float): Fraction of data to use for validation (0.0 to 1.0)
24
+ seed (int): Random seed for reproducibility
25
+ """
26
+ self.root_dir = os.path.join(root_dir, "garbage", "data")
27
+ self.image_size = image_size
28
+ self.val_split = val_split
29
+ self.seed = seed
30
+
31
+ # Define transforms for training and validation
32
+ self.preprocess = preprocess
33
+ if self.preprocess:
34
+ self.train_transforms = transforms.Compose(
35
+ [
36
+ transforms.RandomResizedCrop(self.image_size),
37
+ transforms.RandomHorizontalFlip(),
38
+ transforms.RandomRotation(15),
39
+ transforms.ToTensor(),
40
+ transforms.Normalize(
41
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
42
+ ),
43
+ ]
44
+ )
45
+
46
+ self.test_transforms = transforms.Compose(
47
+ [
48
+ transforms.Resize(self.image_size + 32),
49
+ transforms.CenterCrop(self.image_size),
50
+ transforms.ToTensor(),
51
+ transforms.Normalize(
52
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
53
+ ),
54
+ ]
55
+ )
56
+ else:
57
+ self.train_transforms = transforms.Compose(
58
+ [
59
+ transforms.Resize((self.image_size, self.image_size)),
60
+ transforms.ToTensor(),
61
+ ]
62
+ )
63
+
64
+ self.test_transforms = transforms.Compose(
65
+ [
66
+ transforms.Resize((self.image_size, self.image_size)),
67
+ transforms.ToTensor(),
68
+ ]
69
+ )
70
+
71
+ # Load the full dataset
72
+ self.full_dataset = datasets.ImageFolder(
73
+ root=self.root_dir, transform=None # We'll apply transforms later
74
+ )
75
+
76
+ # Split into train and validation sets
77
+ self._create_splits()
78
+
79
+ def _create_splits(self):
80
+ """Create train and validation splits."""
81
+ # Calculate lengths
82
+ val_size = int(len(self.full_dataset) * self.val_split)
83
+ train_size = len(self.full_dataset) - val_size
84
+
85
+ # Create splits
86
+ generator = torch.Generator().manual_seed(self.seed)
87
+ self.train_dataset, self.test_dataset = random_split(
88
+ self.full_dataset, [train_size, val_size], generator=generator
89
+ )
90
+
91
+ # Create transform datasets
92
+ self.train_dataset = TransformDataset(self.train_dataset, self.train_transforms)
93
+ self.test_dataset = TransformDataset(self.test_dataset, self.test_transforms)
94
+
95
+ def get_dataloaders(
96
+ self, batch_size: int = 32, num_workers: int = 4, pin_memory: bool = True
97
+ ) -> Tuple[DataLoader, DataLoader]:
98
+ """
99
+ Create and return train and validation dataloaders.
100
+
101
+ Args:
102
+ batch_size (int): Batch size for both loaders
103
+ num_workers (int): Number of workers for data loading
104
+ pin_memory (bool): Whether to pin memory for GPU training
105
+
106
+ Returns:
107
+ tuple: (train_dataloader, val_dataloader)
108
+ """
109
+ train_loader = DataLoader(
110
+ self.train_dataset,
111
+ batch_size=batch_size,
112
+ shuffle=True,
113
+ num_workers=num_workers,
114
+ pin_memory=pin_memory,
115
+ )
116
+
117
+ test_loader = DataLoader(
118
+ self.test_dataset,
119
+ batch_size=batch_size,
120
+ shuffle=False,
121
+ num_workers=num_workers,
122
+ pin_memory=pin_memory,
123
+ )
124
+
125
+ return train_loader, test_loader
126
+
127
+ @property
128
+ def classes(self) -> list:
129
+ """Get the list of class names."""
130
+ return self.full_dataset.classes
131
+
132
+ @property
133
+ def class_to_idx(self) -> dict:
134
+ """Get the class to index mapping."""
135
+ return self.full_dataset.class_to_idx
136
+
137
+
138
+ class TransformDataset(torch.utils.data.Dataset):
139
+ """Dataset wrapper that applies transforms to a subset of data."""
140
+
141
+ def __init__(
142
+ self, subset: torch.utils.data.Dataset, transform: Optional[Callable] = None
143
+ ):
144
+ self.subset = subset
145
+ self.transform = transform
146
+
147
+ def __getitem__(self, idx):
148
+ x, y = self.subset[idx]
149
+ if self.transform:
150
+ x = self.transform(x)
151
+ return x, y
152
+
153
+ def __len__(self):
154
+ return len(self.subset)
155
+
156
+
157
+ class Garbage:
158
+ def __init__(
159
+ self,
160
+ preprocess,
161
+ location=os.path.expanduser("~/data"),
162
+ batch_size=32,
163
+ num_workers=16,
164
+ ):
165
+ self.dataset = GarbageDataset(preprocess, root_dir=location)
166
+ self.train_dataset, self.test_dataset = (
167
+ self.dataset.train_dataset,
168
+ self.dataset.test_dataset,
169
+ )
170
+ self.train_loader, self.test_loader = self.dataset.get_dataloaders(
171
+ batch_size=batch_size, num_workers=num_workers
172
+ )
173
+ self.classnames = self.dataset.classes
174
+ idx_to_class = dict((v, k) for k, v in self.dataset.class_to_idx.items())
175
+ self.classnames = [
176
+ idx_to_class[i].replace("_", " ").lower() for i in range(len(idx_to_class))
177
+ ]
datasets/gtsrb.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ import pathlib
4
+ from typing import Any, Callable, Dict, List, Optional, Tuple
5
+
6
+ import numpy as np
7
+ import PIL
8
+ import torch
9
+ from torchvision.datasets.folder import make_dataset
10
+ from torchvision.datasets.utils import (download_and_extract_archive,
11
+ verify_str_arg)
12
+ from torchvision.datasets.vision import VisionDataset
13
+
14
+ def find_classes(directory: str) -> Tuple[List[str], Dict[str, int]]:
15
+ """Finds the class folders in a dataset.
16
+
17
+ See :class:`DatasetFolder` for details.
18
+ """
19
+ classes = sorted(entry.name for entry in os.scandir(directory) if entry.is_dir())
20
+ if not classes:
21
+ raise FileNotFoundError(f"Couldn't find any class folder in {directory}.")
22
+
23
+ class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
24
+ return classes, class_to_idx
25
+
26
+ class PyTorchGTSRB(VisionDataset):
27
+ """`German Traffic Sign Recognition Benchmark (GTSRB) <https://benchmark.ini.rub.de/>`_ Dataset.
28
+
29
+ Modified from https://pytorch.org/vision/main/_modules/torchvision/datasets/gtsrb.html#GTSRB.
30
+
31
+ Args:
32
+ root (string): Root directory of the dataset.
33
+ split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``.
34
+ transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
35
+ version. E.g, ``transforms.RandomCrop``.
36
+ target_transform (callable, optional): A function/transform that takes in the target and transforms it.
37
+ download (bool, optional): If True, downloads the dataset from the internet and
38
+ puts it in root directory. If dataset is already downloaded, it is not
39
+ downloaded again.
40
+ """
41
+
42
+ def __init__(
43
+ self,
44
+ root: str,
45
+ split: str = "train",
46
+ transform: Optional[Callable] = None,
47
+ target_transform: Optional[Callable] = None,
48
+ download: bool = False,
49
+ ) -> None:
50
+
51
+ super().__init__(root, transform=transform, target_transform=target_transform)
52
+
53
+ self._split = verify_str_arg(split, "split", ("train", "test"))
54
+ self._base_folder = pathlib.Path(root) / "gtsrb"
55
+ self._target_folder = (
56
+ self._base_folder / "GTSRB" / ("Training" if self._split == "train" else "Final_Test/Images")
57
+ )
58
+
59
+ if download:
60
+ self.download()
61
+
62
+ if not self._check_exists():
63
+ raise RuntimeError("Dataset not found. You can use download=True to download it")
64
+
65
+ if self._split == "train":
66
+ _, class_to_idx = find_classes(str(self._target_folder))
67
+ samples = make_dataset(str(self._target_folder), extensions=(".ppm",), class_to_idx=class_to_idx)
68
+ else:
69
+ with open(self._base_folder / "GT-final_test.csv") as csv_file:
70
+ samples = [
71
+ (str(self._target_folder / row["Filename"]), int(row["ClassId"]))
72
+ for row in csv.DictReader(csv_file, delimiter=";", skipinitialspace=True)
73
+ ]
74
+
75
+ self._samples = samples
76
+ self.transform = transform
77
+ self.target_transform = target_transform
78
+
79
+ def __len__(self) -> int:
80
+ return len(self._samples)
81
+
82
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
83
+
84
+ path, target = self._samples[index]
85
+ sample = PIL.Image.open(path).convert("RGB")
86
+
87
+ if self.transform is not None:
88
+ sample = self.transform(sample)
89
+
90
+ if self.target_transform is not None:
91
+ target = self.target_transform(target)
92
+
93
+ return sample, target
94
+
95
+
96
+ def _check_exists(self) -> bool:
97
+ return self._target_folder.is_dir()
98
+
99
+ def download(self) -> None:
100
+ if self._check_exists():
101
+ return
102
+
103
+ base_url = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
104
+
105
+ if self._split == "train":
106
+ download_and_extract_archive(
107
+ f"{base_url}GTSRB-Training_fixed.zip",
108
+ download_root=str(self._base_folder),
109
+ md5="513f3c79a4c5141765e10e952eaa2478",
110
+ )
111
+ else:
112
+ download_and_extract_archive(
113
+ f"{base_url}GTSRB_Final_Test_Images.zip",
114
+ download_root=str(self._base_folder),
115
+ md5="c7e4e6327067d32654124b0fe9e82185",
116
+ )
117
+ download_and_extract_archive(
118
+ f"{base_url}GTSRB_Final_Test_GT.zip",
119
+ download_root=str(self._base_folder),
120
+ md5="fe31e9c9270bbcd7b84b7f21a9d9d9e5",
121
+ )
122
+
123
+
124
+ class GTSRB:
125
+ def __init__(self,
126
+ preprocess,
127
+ location=os.path.expanduser('~/data'),
128
+ batch_size=128,
129
+ num_workers=16):
130
+
131
+ # to fit with repo conventions for location
132
+ self.train_dataset = PyTorchGTSRB(
133
+ root=location,
134
+ download=True,
135
+ split='train',
136
+ transform=preprocess
137
+ )
138
+
139
+ self.train_loader = torch.utils.data.DataLoader(
140
+ self.train_dataset,
141
+ batch_size=batch_size,
142
+ shuffle=True,
143
+ num_workers=num_workers
144
+ )
145
+
146
+ self.test_dataset = PyTorchGTSRB(
147
+ root=location,
148
+ download=True,
149
+ split='test',
150
+ transform=preprocess
151
+ )
152
+
153
+ self.test_loader = torch.utils.data.DataLoader(
154
+ self.test_dataset,
155
+ batch_size=batch_size,
156
+ shuffle=False,
157
+ num_workers=num_workers
158
+ )
159
+
160
+ # from https://github.com/openai/CLIP/blob/e184f608c5d5e58165682f7c332c3a8b4c1545f2/data/prompts.md
161
+ self.classnames = [
162
+ 'red and white circle 20 kph speed limit',
163
+ 'red and white circle 30 kph speed limit',
164
+ 'red and white circle 50 kph speed limit',
165
+ 'red and white circle 60 kph speed limit',
166
+ 'red and white circle 70 kph speed limit',
167
+ 'red and white circle 80 kph speed limit',
168
+ 'end / de-restriction of 80 kph speed limit',
169
+ 'red and white circle 100 kph speed limit',
170
+ 'red and white circle 120 kph speed limit',
171
+ 'red and white circle red car and black car no passing',
172
+ 'red and white circle red truck and black car no passing',
173
+ 'red and white triangle road intersection warning',
174
+ 'white and yellow diamond priority road',
175
+ 'red and white upside down triangle yield right-of-way',
176
+ 'stop',
177
+ 'empty red and white circle',
178
+ 'red and white circle no truck entry',
179
+ 'red circle with white horizonal stripe no entry',
180
+ 'red and white triangle with exclamation mark warning',
181
+ 'red and white triangle with black left curve approaching warning',
182
+ 'red and white triangle with black right curve approaching warning',
183
+ 'red and white triangle with black double curve approaching warning',
184
+ 'red and white triangle rough / bumpy road warning',
185
+ 'red and white triangle car skidding / slipping warning',
186
+ 'red and white triangle with merging / narrow lanes warning',
187
+ 'red and white triangle with person digging / construction / road work warning',
188
+ 'red and white triangle with traffic light approaching warning',
189
+ 'red and white triangle with person walking warning',
190
+ 'red and white triangle with child and person walking warning',
191
+ 'red and white triangle with bicyle warning',
192
+ 'red and white triangle with snowflake / ice warning',
193
+ 'red and white triangle with deer warning',
194
+ 'white circle with gray strike bar no speed limit',
195
+ 'blue circle with white right turn arrow mandatory',
196
+ 'blue circle with white left turn arrow mandatory',
197
+ 'blue circle with white forward arrow mandatory',
198
+ 'blue circle with white forward or right turn arrow mandatory',
199
+ 'blue circle with white forward or left turn arrow mandatory',
200
+ 'blue circle with white keep right arrow mandatory',
201
+ 'blue circle with white keep left arrow mandatory',
202
+ 'blue circle with white arrows indicating a traffic circle',
203
+ 'white circle with gray strike bar indicating no passing for cars has ended',
204
+ 'white circle with gray strike bar indicating no passing for trucks has ended',
205
+ ]
datasets/imagenet.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+
4
+ from .common import ImageFolderWithPaths, SubsetSampler
5
+ import numpy as np
6
+
7
+
8
+ imagenet_classnames = [
9
+ "tench", "goldfish", "great white shark", "tiger shark", "hammerhead shark", "electric ray",
10
+ "stingray", "rooster", "hen", "ostrich", "brambling", "goldfinch", "house finch", "junco",
11
+ "indigo bunting", "American robin", "bulbul", "jay", "magpie", "chickadee", "American dipper",
12
+ "kite (bird of prey)", "bald eagle", "vulture", "great grey owl", "fire salamander",
13
+ "smooth newt", "newt", "spotted salamander", "axolotl", "American bullfrog", "tree frog",
14
+ "tailed frog", "loggerhead sea turtle", "leatherback sea turtle", "mud turtle", "terrapin",
15
+ "box turtle", "banded gecko", "green iguana", "Carolina anole",
16
+ "desert grassland whiptail lizard", "agama", "frilled-necked lizard", "alligator lizard",
17
+ "Gila monster", "European green lizard", "chameleon", "Komodo dragon", "Nile crocodile",
18
+ "American alligator", "triceratops", "worm snake", "ring-necked snake",
19
+ "eastern hog-nosed snake", "smooth green snake", "kingsnake", "garter snake", "water snake",
20
+ "vine snake", "night snake", "boa constrictor", "African rock python", "Indian cobra",
21
+ "green mamba", "sea snake", "Saharan horned viper", "eastern diamondback rattlesnake",
22
+ "sidewinder rattlesnake", "trilobite", "harvestman", "scorpion", "yellow garden spider",
23
+ "barn spider", "European garden spider", "southern black widow", "tarantula", "wolf spider",
24
+ "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse", "prairie grouse", "peafowl",
25
+ "quail", "partridge", "african grey parrot", "macaw", "sulphur-crested cockatoo", "lorikeet",
26
+ "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "duck",
27
+ "red-breasted merganser", "goose", "black swan", "tusker", "echidna", "platypus", "wallaby",
28
+ "koala", "wombat", "jellyfish", "sea anemone", "brain coral", "flatworm", "nematode", "conch",
29
+ "snail", "slug", "sea slug", "chiton", "chambered nautilus", "Dungeness crab", "rock crab",
30
+ "fiddler crab", "red king crab", "American lobster", "spiny lobster", "crayfish", "hermit crab",
31
+ "isopod", "white stork", "black stork", "spoonbill", "flamingo", "little blue heron",
32
+ "great egret", "bittern bird", "crane bird", "limpkin", "common gallinule", "American coot",
33
+ "bustard", "ruddy turnstone", "dunlin", "common redshank", "dowitcher", "oystercatcher",
34
+ "pelican", "king penguin", "albatross", "grey whale", "killer whale", "dugong", "sea lion",
35
+ "Chihuahua", "Japanese Chin", "Maltese", "Pekingese", "Shih Tzu", "King Charles Spaniel",
36
+ "Papillon", "toy terrier", "Rhodesian Ridgeback", "Afghan Hound", "Basset Hound", "Beagle",
37
+ "Bloodhound", "Bluetick Coonhound", "Black and Tan Coonhound", "Treeing Walker Coonhound",
38
+ "English foxhound", "Redbone Coonhound", "borzoi", "Irish Wolfhound", "Italian Greyhound",
39
+ "Whippet", "Ibizan Hound", "Norwegian Elkhound", "Otterhound", "Saluki", "Scottish Deerhound",
40
+ "Weimaraner", "Staffordshire Bull Terrier", "American Staffordshire Terrier",
41
+ "Bedlington Terrier", "Border Terrier", "Kerry Blue Terrier", "Irish Terrier",
42
+ "Norfolk Terrier", "Norwich Terrier", "Yorkshire Terrier", "Wire Fox Terrier",
43
+ "Lakeland Terrier", "Sealyham Terrier", "Airedale Terrier", "Cairn Terrier",
44
+ "Australian Terrier", "Dandie Dinmont Terrier", "Boston Terrier", "Miniature Schnauzer",
45
+ "Giant Schnauzer", "Standard Schnauzer", "Scottish Terrier", "Tibetan Terrier",
46
+ "Australian Silky Terrier", "Soft-coated Wheaten Terrier", "West Highland White Terrier",
47
+ "Lhasa Apso", "Flat-Coated Retriever", "Curly-coated Retriever", "Golden Retriever",
48
+ "Labrador Retriever", "Chesapeake Bay Retriever", "German Shorthaired Pointer", "Vizsla",
49
+ "English Setter", "Irish Setter", "Gordon Setter", "Brittany dog", "Clumber Spaniel",
50
+ "English Springer Spaniel", "Welsh Springer Spaniel", "Cocker Spaniel", "Sussex Spaniel",
51
+ "Irish Water Spaniel", "Kuvasz", "Schipperke", "Groenendael dog", "Malinois", "Briard",
52
+ "Australian Kelpie", "Komondor", "Old English Sheepdog", "Shetland Sheepdog", "collie",
53
+ "Border Collie", "Bouvier des Flandres dog", "Rottweiler", "German Shepherd Dog", "Dobermann",
54
+ "Miniature Pinscher", "Greater Swiss Mountain Dog", "Bernese Mountain Dog",
55
+ "Appenzeller Sennenhund", "Entlebucher Sennenhund", "Boxer", "Bullmastiff", "Tibetan Mastiff",
56
+ "French Bulldog", "Great Dane", "St. Bernard", "husky", "Alaskan Malamute", "Siberian Husky",
57
+ "Dalmatian", "Affenpinscher", "Basenji", "pug", "Leonberger", "Newfoundland dog",
58
+ "Great Pyrenees dog", "Samoyed", "Pomeranian", "Chow Chow", "Keeshond", "brussels griffon",
59
+ "Pembroke Welsh Corgi", "Cardigan Welsh Corgi", "Toy Poodle", "Miniature Poodle",
60
+ "Standard Poodle", "Mexican hairless dog (xoloitzcuintli)", "grey wolf", "Alaskan tundra wolf",
61
+ "red wolf or maned wolf", "coyote", "dingo", "dhole", "African wild dog", "hyena", "red fox",
62
+ "kit fox", "Arctic fox", "grey fox", "tabby cat", "tiger cat", "Persian cat", "Siamese cat",
63
+ "Egyptian Mau", "cougar", "lynx", "leopard", "snow leopard", "jaguar", "lion", "tiger",
64
+ "cheetah", "brown bear", "American black bear", "polar bear", "sloth bear", "mongoose",
65
+ "meerkat", "tiger beetle", "ladybug", "ground beetle", "longhorn beetle", "leaf beetle",
66
+ "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant", "grasshopper",
67
+ "cricket insect", "stick insect", "cockroach", "praying mantis", "cicada", "leafhopper",
68
+ "lacewing", "dragonfly", "damselfly", "red admiral butterfly", "ringlet butterfly",
69
+ "monarch butterfly", "small white butterfly", "sulphur butterfly", "gossamer-winged butterfly",
70
+ "starfish", "sea urchin", "sea cucumber", "cottontail rabbit", "hare", "Angora rabbit",
71
+ "hamster", "porcupine", "fox squirrel", "marmot", "beaver", "guinea pig", "common sorrel horse",
72
+ "zebra", "pig", "wild boar", "warthog", "hippopotamus", "ox", "water buffalo", "bison",
73
+ "ram (adult male sheep)", "bighorn sheep", "Alpine ibex", "hartebeest", "impala (antelope)",
74
+ "gazelle", "arabian camel", "llama", "weasel", "mink", "European polecat",
75
+ "black-footed ferret", "otter", "skunk", "badger", "armadillo", "three-toed sloth", "orangutan",
76
+ "gorilla", "chimpanzee", "gibbon", "siamang", "guenon", "patas monkey", "baboon", "macaque",
77
+ "langur", "black-and-white colobus", "proboscis monkey", "marmoset", "white-headed capuchin",
78
+ "howler monkey", "titi monkey", "Geoffroy's spider monkey", "common squirrel monkey",
79
+ "ring-tailed lemur", "indri", "Asian elephant", "African bush elephant", "red panda",
80
+ "giant panda", "snoek fish", "eel", "silver salmon", "rock beauty fish", "clownfish",
81
+ "sturgeon", "gar fish", "lionfish", "pufferfish", "abacus", "abaya", "academic gown",
82
+ "accordion", "acoustic guitar", "aircraft carrier", "airliner", "airship", "altar", "ambulance",
83
+ "amphibious vehicle", "analog clock", "apiary", "apron", "trash can", "assault rifle",
84
+ "backpack", "bakery", "balance beam", "balloon", "ballpoint pen", "Band-Aid", "banjo",
85
+ "baluster / handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel",
86
+ "wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "swimming cap", "bath towel",
87
+ "bathtub", "station wagon", "lighthouse", "beaker", "military hat (bearskin or shako)",
88
+ "beer bottle", "beer glass", "bell tower", "baby bib", "tandem bicycle", "bikini",
89
+ "ring binder", "binoculars", "birdhouse", "boathouse", "bobsleigh", "bolo tie", "poke bonnet",
90
+ "bookcase", "bookstore", "bottle cap", "hunting bow", "bow tie", "brass memorial plaque", "bra",
91
+ "breakwater", "breastplate", "broom", "bucket", "buckle", "bulletproof vest",
92
+ "high-speed train", "butcher shop", "taxicab", "cauldron", "candle", "cannon", "canoe",
93
+ "can opener", "cardigan", "car mirror", "carousel", "tool kit", "cardboard box / carton",
94
+ "car wheel", "automated teller machine", "cassette", "cassette player", "castle", "catamaran",
95
+ "CD player", "cello", "mobile phone", "chain", "chain-link fence", "chain mail", "chainsaw",
96
+ "storage chest", "chiffonier", "bell or wind chime", "china cabinet", "Christmas stocking",
97
+ "church", "movie theater", "cleaver", "cliff dwelling", "cloak", "clogs", "cocktail shaker",
98
+ "coffee mug", "coffeemaker", "spiral or coil", "combination lock", "computer keyboard",
99
+ "candy store", "container ship", "convertible", "corkscrew", "cornet", "cowboy boot",
100
+ "cowboy hat", "cradle", "construction crane", "crash helmet", "crate", "infant bed",
101
+ "Crock Pot", "croquet ball", "crutch", "cuirass", "dam", "desk", "desktop computer",
102
+ "rotary dial telephone", "diaper", "digital clock", "digital watch", "dining table",
103
+ "dishcloth", "dishwasher", "disc brake", "dock", "dog sled", "dome", "doormat", "drilling rig",
104
+ "drum", "drumstick", "dumbbell", "Dutch oven", "electric fan", "electric guitar",
105
+ "electric locomotive", "entertainment center", "envelope", "espresso machine", "face powder",
106
+ "feather boa", "filing cabinet", "fireboat", "fire truck", "fire screen", "flagpole", "flute",
107
+ "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster bed",
108
+ "freight car", "French horn", "frying pan", "fur coat", "garbage truck",
109
+ "gas mask or respirator", "gas pump", "goblet", "go-kart", "golf ball", "golf cart", "gondola",
110
+ "gong", "gown", "grand piano", "greenhouse", "radiator grille", "grocery store", "guillotine",
111
+ "hair clip", "hair spray", "half-track", "hammer", "hamper", "hair dryer", "hand-held computer",
112
+ "handkerchief", "hard disk drive", "harmonica", "harp", "combine harvester", "hatchet",
113
+ "holster", "home theater", "honeycomb", "hook", "hoop skirt", "gymnastic horizontal bar",
114
+ "horse-drawn vehicle", "hourglass", "iPod", "clothes iron", "carved pumpkin", "jeans", "jeep",
115
+ "T-shirt", "jigsaw puzzle", "rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat",
116
+ "ladle", "lampshade", "laptop computer", "lawn mower", "lens cap", "letter opener", "library",
117
+ "lifeboat", "lighter", "limousine", "ocean liner", "lipstick", "slip-on shoe", "lotion",
118
+ "music speaker", "loupe magnifying glass", "sawmill", "magnetic compass", "messenger bag",
119
+ "mailbox", "tights", "one-piece bathing suit", "manhole cover", "maraca", "marimba", "mask",
120
+ "matchstick", "maypole", "maze", "measuring cup", "medicine cabinet", "megalith", "microphone",
121
+ "microwave oven", "military uniform", "milk can", "minibus", "miniskirt", "minivan", "missile",
122
+ "mitten", "mixing bowl", "mobile home", "ford model t", "modem", "monastery", "monitor",
123
+ "moped", "mortar and pestle", "graduation cap", "mosque", "mosquito net", "vespa",
124
+ "mountain bike", "tent", "computer mouse", "mousetrap", "moving van", "muzzle", "metal nail",
125
+ "neck brace", "necklace", "baby pacifier", "notebook computer", "obelisk", "oboe", "ocarina",
126
+ "odometer", "oil filter", "pipe organ", "oscilloscope", "overskirt", "bullock cart",
127
+ "oxygen mask", "product packet / packaging", "paddle", "paddle wheel", "padlock", "paintbrush",
128
+ "pajamas", "palace", "pan flute", "paper towel", "parachute", "parallel bars", "park bench",
129
+ "parking meter", "railroad car", "patio", "payphone", "pedestal", "pencil case",
130
+ "pencil sharpener", "perfume", "Petri dish", "photocopier", "plectrum", "Pickelhaube",
131
+ "picket fence", "pickup truck", "pier", "piggy bank", "pill bottle", "pillow", "ping-pong ball",
132
+ "pinwheel", "pirate ship", "drink pitcher", "block plane", "planetarium", "plastic bag",
133
+ "plate rack", "farm plow", "plunger", "Polaroid camera", "pole", "police van", "poncho",
134
+ "pool table", "soda bottle", "plant pot", "potter's wheel", "power drill", "prayer rug",
135
+ "printer", "prison", "missile", "projector", "hockey puck", "punching bag", "purse", "quill",
136
+ "quilt", "race car", "racket", "radiator", "radio", "radio telescope", "rain barrel",
137
+ "recreational vehicle", "fishing casting reel", "reflex camera", "refrigerator",
138
+ "remote control", "restaurant", "revolver", "rifle", "rocking chair", "rotisserie", "eraser",
139
+ "rugby ball", "ruler measuring stick", "sneaker", "safe", "safety pin", "salt shaker", "sandal",
140
+ "sarong", "saxophone", "scabbard", "weighing scale", "school bus", "schooner", "scoreboard",
141
+ "CRT monitor", "screw", "screwdriver", "seat belt", "sewing machine", "shield", "shoe store",
142
+ "shoji screen / room divider", "shopping basket", "shopping cart", "shovel", "shower cap",
143
+ "shower curtain", "ski", "balaclava ski mask", "sleeping bag", "slide rule", "sliding door",
144
+ "slot machine", "snorkel", "snowmobile", "snowplow", "soap dispenser", "soccer ball", "sock",
145
+ "solar thermal collector", "sombrero", "soup bowl", "keyboard space bar", "space heater",
146
+ "space shuttle", "spatula", "motorboat", "spider web", "spindle", "sports car", "spotlight",
147
+ "stage", "steam locomotive", "through arch bridge", "steel drum", "stethoscope", "scarf",
148
+ "stone wall", "stopwatch", "stove", "strainer", "tram", "stretcher", "couch", "stupa",
149
+ "submarine", "suit", "sundial", "sunglasses", "sunglasses", "sunscreen", "suspension bridge",
150
+ "mop", "sweatshirt", "swim trunks / shorts", "swing", "electrical switch", "syringe",
151
+ "table lamp", "tank", "tape player", "teapot", "teddy bear", "television", "tennis ball",
152
+ "thatched roof", "front curtain", "thimble", "threshing machine", "throne", "tile roof",
153
+ "toaster", "tobacco shop", "toilet seat", "torch", "totem pole", "tow truck", "toy store",
154
+ "tractor", "semi-trailer truck", "tray", "trench coat", "tricycle", "trimaran", "tripod",
155
+ "triumphal arch", "trolleybus", "trombone", "hot tub", "turnstile", "typewriter keyboard",
156
+ "umbrella", "unicycle", "upright piano", "vacuum cleaner", "vase", "vaulted or arched ceiling",
157
+ "velvet fabric", "vending machine", "vestment", "viaduct", "violin", "volleyball",
158
+ "waffle iron", "wall clock", "wallet", "wardrobe", "military aircraft", "sink",
159
+ "washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle",
160
+ "hair wig", "window screen", "window shade", "Windsor tie", "wine bottle", "airplane wing",
161
+ "wok", "wooden spoon", "wool", "split-rail fence", "shipwreck", "sailboat", "yurt", "website",
162
+ "comic book", "crossword", "traffic or street sign", "traffic light", "dust jacket", "menu",
163
+ "plate", "guacamole", "consomme", "hot pot", "trifle", "ice cream", "popsicle", "baguette",
164
+ "bagel", "pretzel", "cheeseburger", "hot dog", "mashed potatoes", "cabbage", "broccoli",
165
+ "cauliflower", "zucchini", "spaghetti squash", "acorn squash", "butternut squash", "cucumber",
166
+ "artichoke", "bell pepper", "cardoon", "mushroom", "Granny Smith apple", "strawberry", "orange",
167
+ "lemon", "fig", "pineapple", "banana", "jackfruit", "cherimoya (custard apple)", "pomegranate",
168
+ "hay", "carbonara", "chocolate syrup", "dough", "meatloaf", "pizza", "pot pie", "burrito",
169
+ "red wine", "espresso", "tea cup", "eggnog", "mountain", "bubble", "cliff", "coral reef",
170
+ "geyser", "lakeshore", "promontory", "sandbar", "beach", "valley", "volcano", "baseball player",
171
+ "bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper", "corn", "acorn",
172
+ "rose hip", "horse chestnut seed", "coral fungus", "agaric", "gyromitra", "stinkhorn mushroom",
173
+ "earth star fungus", "hen of the woods mushroom", "bolete", "corn cob", "toilet paper"
174
+ ]
175
+
176
+ class ImageNet:
177
+ def __init__(self,
178
+ preprocess,
179
+ location=os.path.expanduser('~/data'),
180
+ batch_size=32,
181
+ num_workers=32):
182
+ self.preprocess = preprocess
183
+ self.location = location
184
+ self.batch_size = batch_size
185
+ self.num_workers = num_workers
186
+ self.classnames = imagenet_classnames
187
+
188
+ self.populate_train()
189
+ self.populate_test()
190
+
191
+ def populate_train(self):
192
+ traindir = os.path.join(self.location, self.name(), 'train')
193
+ self.train_dataset = ImageFolderWithPaths(
194
+ traindir,
195
+ transform=self.preprocess)
196
+ sampler = self.get_train_sampler()
197
+ kwargs = {'shuffle' : True} if sampler is None else {}
198
+ self.train_loader = torch.utils.data.DataLoader(
199
+ self.train_dataset,
200
+ sampler=sampler,
201
+ batch_size=self.batch_size,
202
+ num_workers=self.num_workers,
203
+ **kwargs,
204
+ )
205
+
206
+ def populate_test(self):
207
+ self.test_dataset = self.get_test_dataset()
208
+ self.test_loader = torch.utils.data.DataLoader(
209
+ self.test_dataset,
210
+ batch_size=self.batch_size,
211
+ num_workers=self.num_workers,
212
+ sampler=self.get_test_sampler()
213
+ )
214
+
215
+ def get_test_path(self):
216
+ test_path = os.path.join(self.location, self.name(), 'val_in_folder')
217
+ if not os.path.exists(test_path):
218
+ test_path = os.path.join(self.location, self.name(), 'val')
219
+ return test_path
220
+
221
+ def get_train_sampler(self):
222
+ return None
223
+
224
+ def get_test_sampler(self):
225
+ return None
226
+
227
+ def get_test_dataset(self):
228
+ return ImageFolderWithPaths(self.get_test_path(), transform=self.preprocess)
229
+
230
+ def name(self):
231
+ return 'imagenet'
232
+
233
+ class ImageNetTrain(ImageNet):
234
+
235
+ def get_test_dataset(self):
236
+ pass
237
+
238
+ class ImageNetK(ImageNet):
239
+
240
+ def get_train_sampler(self):
241
+ idxs = np.zeros(len(self.train_dataset.targets))
242
+ target_array = np.array(self.train_dataset.targets)
243
+ for c in range(1000):
244
+ m = target_array == c
245
+ n = len(idxs[m])
246
+ arr = np.zeros(n)
247
+ arr[:self.k()] = 1
248
+ np.random.shuffle(arr)
249
+ idxs[m] = arr
250
+
251
+ idxs = idxs.astype('int')
252
+ sampler = SubsetSampler(np.where(idxs)[0])
253
+ return sampler
datasets/kvasir.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from torch.utils.data import Dataset, DataLoader, random_split
4
+ from torchvision import datasets, transforms
5
+ from typing import Tuple, Optional
6
+
7
+
8
+ class KvasirDataset:
9
+ def __init__(
10
+ self,
11
+ preprocess,
12
+ data_dir: str = "theRestDataset/kvasir",
13
+ image_size: int = 224,
14
+ val_split: float = 0.2,
15
+ seed: int = 42,
16
+ batch_size: int = 32,
17
+ num_workers: int = 4,
18
+ ):
19
+ """
20
+ Initialize the Kvasir dataset with train and validation splits.
21
+
22
+ Args:
23
+ data_dir (str): Path to the Kvasir dataset directory
24
+ image_size (int): Size of the input images (default: 224)
25
+ val_split (float): Validation split ratio (default: 0.2)
26
+ seed (int): Random seed for reproducibility (default: 42)
27
+ batch_size (int): Batch size for dataloaders (default: 32)
28
+ num_workers (int): Number of workers for dataloaders (default: 4)
29
+ """
30
+ data_dir = os.path.join(data_dir, "kvasir")
31
+ self.data_dir = data_dir
32
+ self.image_size = image_size
33
+ self.val_split = val_split
34
+ self.seed = seed
35
+ self.batch_size = batch_size
36
+ self.num_workers = num_workers
37
+
38
+ # Set up transforms
39
+ self.preprocess = preprocess
40
+ if self.preprocess:
41
+ self.train_transforms = transforms.Compose(
42
+ [
43
+ transforms.RandomResizedCrop(self.image_size),
44
+ transforms.RandomHorizontalFlip(),
45
+ transforms.RandomRotation(15),
46
+ transforms.ColorJitter(
47
+ brightness=0.2, contrast=0.2, saturation=0.2
48
+ ),
49
+ transforms.ToTensor(),
50
+ transforms.Normalize(
51
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
52
+ ),
53
+ ]
54
+ )
55
+
56
+ self.test_transforms = transforms.Compose(
57
+ [
58
+ transforms.Resize(self.image_size + 32),
59
+ transforms.CenterCrop(self.image_size),
60
+ transforms.ToTensor(),
61
+ transforms.Normalize(
62
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
63
+ ),
64
+ ]
65
+ )
66
+ else:
67
+ self.train_transforms = transforms.Compose(
68
+ [
69
+ transforms.Resize((self.image_size, self.image_size)),
70
+ transforms.ToTensor(),
71
+ ]
72
+ )
73
+
74
+ self.test_transforms = transforms.Compose(
75
+ [
76
+ transforms.Resize((self.image_size, self.image_size)),
77
+ transforms.ToTensor(),
78
+ ]
79
+ )
80
+
81
+ # Load and split dataset
82
+ self.setup()
83
+
84
+ def setup(self):
85
+ """Set up the dataset splits and create dataloaders."""
86
+ # Load the full dataset with training transforms
87
+ full_dataset = datasets.ImageFolder(
88
+ root=self.data_dir, transform=self.train_transforms
89
+ )
90
+
91
+ # Calculate split sizes
92
+ val_size = int(len(full_dataset) * self.val_split)
93
+ train_size = len(full_dataset) - val_size
94
+
95
+ # Create splits
96
+ generator = torch.Generator().manual_seed(self.seed)
97
+ self.train_dataset, self.test_dataset = random_split(
98
+ full_dataset, [train_size, val_size], generator=generator
99
+ )
100
+
101
+ # Override transforms for validation dataset
102
+ self.test_dataset.dataset.transform = self.test_transforms
103
+
104
+ # Create class names mapping
105
+ self.classes = full_dataset.classes
106
+ self.class_to_idx = full_dataset.class_to_idx
107
+
108
+ def get_dataloaders(self) -> Tuple[DataLoader, DataLoader]:
109
+ """
110
+ Create and return train and validation dataloaders.
111
+
112
+ Returns:
113
+ tuple: (train_dataloader, val_dataloader)
114
+ """
115
+ train_loader = DataLoader(
116
+ self.train_dataset,
117
+ batch_size=self.batch_size,
118
+ shuffle=True,
119
+ num_workers=self.num_workers,
120
+ pin_memory=True,
121
+ )
122
+
123
+ val_loader = DataLoader(
124
+ self.test_dataset,
125
+ batch_size=self.batch_size,
126
+ shuffle=False,
127
+ num_workers=self.num_workers,
128
+ pin_memory=True,
129
+ )
130
+
131
+ return train_loader, val_loader
132
+
133
+ def get_class_names(self) -> list:
134
+ """Return the list of class names."""
135
+ return self.classes
136
+
137
+ def get_class_to_idx(self) -> dict:
138
+ """Return the class to index mapping."""
139
+ return self.class_to_idx
140
+
141
+
142
+ class Kvasir:
143
+ def __init__(
144
+ self,
145
+ preprocess,
146
+ location=os.path.expanduser("~/data"),
147
+ batch_size=32,
148
+ num_workers=16,
149
+ ):
150
+ # Data loading code
151
+
152
+ self.dataset = KvasirDataset(
153
+ preprocess,
154
+ data_dir=location,
155
+ batch_size=batch_size,
156
+ num_workers=num_workers,
157
+ )
158
+ self.train_dataset = self.dataset.train_dataset
159
+ self.test_dataset = self.dataset.test_dataset
160
+ self.train_loader, self.test_loader = self.dataset.get_dataloaders()
161
+ self.classnames = self.dataset.get_class_names()
162
+ idx_to_class = dict((v, k) for k, v in self.dataset.get_class_to_idx().items())
163
+ self.classnames = [
164
+ idx_to_class[i].replace("_", " ").lower() for i in range(len(idx_to_class))
165
+ ]
datasets/landscape.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Optional, Callable, Tuple
3
+
4
+ import torch
5
+ from torchvision import datasets, transforms
6
+ from torch.utils.data import Dataset, DataLoader
7
+
8
+
9
+ class LandscapeDataset:
10
+ def __init__(
11
+ self,
12
+ preprocess,
13
+ root_dir: str,
14
+ image_size: int = 224,
15
+ # train: bool = True,
16
+ # transform: Optional[Callable] = None,
17
+ ):
18
+ """
19
+ Initialize the Landscape dataset.
20
+
21
+ Args:
22
+ root_dir (str): Root directory of the dataset
23
+ image_size (int): Size of the output images (default: 224)
24
+ train (bool): Whether to use training or testing transformations
25
+ transform (callable, optional): Optional custom transform to override defaults
26
+ """
27
+ self.root_dir = root_dir
28
+ self.image_size = image_size
29
+ # self.train = train
30
+ self.preprocess = preprocess
31
+
32
+ # Define default transforms
33
+ if self.preprocess:
34
+ self.train_transforms = transforms.Compose(
35
+ [
36
+ transforms.RandomResizedCrop(self.image_size),
37
+ transforms.RandomHorizontalFlip(),
38
+ transforms.RandomRotation(15),
39
+ transforms.ToTensor(),
40
+ transforms.Normalize(
41
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
42
+ ),
43
+ ]
44
+ )
45
+
46
+ self.test_transforms = transforms.Compose(
47
+ [
48
+ transforms.Resize(self.image_size + 32),
49
+ transforms.CenterCrop(self.image_size),
50
+ transforms.ToTensor(),
51
+ transforms.Normalize(
52
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
53
+ ),
54
+ ]
55
+ )
56
+ else:
57
+ self.train_transforms = transforms.Compose(
58
+ [
59
+ transforms.Resize((self.image_size, self.image_size)),
60
+ transforms.ToTensor(),
61
+ ]
62
+ )
63
+
64
+ self.test_transforms = transforms.Compose(
65
+ [
66
+ transforms.Resize((self.image_size, self.image_size)),
67
+ transforms.ToTensor(),
68
+ ]
69
+ )
70
+
71
+ # # Use custom transform if provided, otherwise use default transforms
72
+ # self.transform = (
73
+ # transform
74
+ # if transform is not None
75
+ # else (self.train_transforms if train else self.test_transforms)
76
+ # )
77
+
78
+ # Create dataset using ImageFolder
79
+ train_dir = os.path.join(root_dir, "train")
80
+ test_dir = os.path.join(root_dir, "test")
81
+ self.train_dataset = datasets.ImageFolder(
82
+ root=train_dir, transform=self.train_transforms
83
+ )
84
+ self.test_dataset = datasets.ImageFolder(
85
+ root=test_dir, transform=self.test_transforms
86
+ )
87
+
88
+ # Store class information
89
+ self.classes = self.train_dataset.classes
90
+ self.class_to_idx = self.train_dataset.class_to_idx
91
+ self.num_classes = len(self.classes)
92
+
93
+ def get_dataloader(
94
+ self,
95
+ dataset: Dataset,
96
+ batch_size: int = 32,
97
+ shuffle: bool = None,
98
+ num_workers: int = 4,
99
+ pin_memory: bool = True,
100
+ ) -> DataLoader:
101
+ """
102
+ Create a DataLoader for the dataset.
103
+
104
+ Args:
105
+ batch_size (int): Batch size for the dataloader
106
+ shuffle (bool, optional): Whether to shuffle the data
107
+ num_workers (int): Number of worker processes
108
+ pin_memory (bool): Whether to pin memory for faster GPU transfer
109
+
110
+ Returns:
111
+ DataLoader: PyTorch DataLoader object
112
+ """
113
+ if shuffle is None:
114
+ shuffle = self.train
115
+
116
+ return DataLoader(
117
+ dataset,
118
+ batch_size=batch_size,
119
+ shuffle=shuffle,
120
+ num_workers=num_workers,
121
+ pin_memory=pin_memory,
122
+ )
123
+
124
+
125
+ class Landscape:
126
+ def __init__(
127
+ self,
128
+ preprocess,
129
+ location: str = os.path.expanduser("~/data"),
130
+ batch_size: int = 32,
131
+ num_workers: int = 16,
132
+ ):
133
+ self.location = os.path.join(location, "landscape")
134
+ self.batch_size = batch_size
135
+ self.num_workers = num_workers
136
+
137
+ self.dataset = LandscapeDataset(preprocess, root_dir=self.location)
138
+ self.train_loader = self.dataset.get_dataloader(
139
+ self.dataset.train_dataset,
140
+ batch_size=batch_size,
141
+ num_workers=num_workers,
142
+ shuffle=True,
143
+ )
144
+ self.test_loader = self.dataset.get_dataloader(
145
+ self.dataset.test_dataset,
146
+ batch_size=batch_size,
147
+ num_workers=num_workers,
148
+ shuffle=False,
149
+ )
150
+ self.train_dataset = self.dataset.train_dataset
151
+ self.test_dataset = self.dataset.test_dataset
152
+ self.classes = self.dataset.classes
153
+
154
+ idx_to_class = dict((v, k) for k, v in self.dataset.class_to_idx.items())
155
+ self.classnames = [
156
+ idx_to_class[i].replace("_", " ").lower() for i in range(len(idx_to_class))
157
+ ]
datasets/mangoleaf.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from torchvision import transforms, datasets
4
+ from torch.utils.data import random_split
5
+ import numpy as np
6
+
7
+
8
+ class MangoLeafDataset:
9
+ def __init__(
10
+ self,
11
+ preprocess,
12
+ data_dir="theRestDataset/mangoLeaf",
13
+ image_size=224,
14
+ val_split=0.2,
15
+ seed=42,
16
+ ):
17
+ """
18
+ Initialize the MangoLeaf dataset.
19
+
20
+ Args:
21
+ data_dir (str): Path to the mangoLeaf directory
22
+ image_size (int): Size of the input images (default: 224)
23
+ val_split (float): Validation split ratio (default: 0.2)
24
+ seed (int): Random seed for reproducibility (default: 42)
25
+ """
26
+ self.data_dir = os.path.join(data_dir, "mangoLeaf")
27
+ self.image_size = image_size
28
+ self.val_split = val_split
29
+ self.seed = seed
30
+ self.preprocess = preprocess
31
+ # Define transforms for training and validation
32
+ if self.preprocess:
33
+ self.train_transforms = transforms.Compose(
34
+ [
35
+ transforms.RandomResizedCrop(self.image_size),
36
+ transforms.RandomHorizontalFlip(),
37
+ transforms.RandomRotation(15),
38
+ transforms.ToTensor(),
39
+ transforms.Normalize(
40
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
41
+ ),
42
+ ]
43
+ )
44
+
45
+ self.test_transforms = transforms.Compose(
46
+ [
47
+ transforms.Resize(self.image_size + 32),
48
+ transforms.CenterCrop(self.image_size),
49
+ transforms.ToTensor(),
50
+ transforms.Normalize(
51
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
52
+ ),
53
+ ]
54
+ )
55
+ else:
56
+ self.train_transforms = transforms.Compose(
57
+ [
58
+ transforms.Resize((self.image_size, self.image_size)),
59
+ transforms.ToTensor(),
60
+ ]
61
+ )
62
+
63
+ self.test_transforms = transforms.Compose(
64
+ [
65
+ transforms.Resize((self.image_size, self.image_size)),
66
+ transforms.ToTensor(),
67
+ ]
68
+ )
69
+
70
+ # Load the full dataset
71
+ self.full_dataset = datasets.ImageFolder(
72
+ root=self.data_dir, transform=None # We'll apply transforms later
73
+ )
74
+
75
+ # Calculate lengths for train and validation splits
76
+ val_size = int(len(self.full_dataset) * self.val_split)
77
+ train_size = len(self.full_dataset) - val_size
78
+
79
+ # Create train and validation splits
80
+ generator = torch.Generator().manual_seed(self.seed)
81
+ self.train_dataset, self.test_dataset = random_split(
82
+ self.full_dataset, [train_size, val_size], generator=generator
83
+ )
84
+
85
+ # Create custom datasets with appropriate transforms
86
+ self.train_dataset = TransformDataset(self.train_dataset, self.train_transforms)
87
+ self.test_dataset = TransformDataset(self.test_dataset, self.test_transforms)
88
+
89
+ # Store class information
90
+ self.classes = self.full_dataset.classes
91
+ self.class_to_idx = self.full_dataset.class_to_idx
92
+
93
+ def get_train_dataset(self):
94
+ """Return the training dataset"""
95
+ return self.train_dataset
96
+
97
+ def get_val_dataset(self):
98
+ """Return the validation dataset"""
99
+ return self.test_dataset
100
+
101
+ def get_class_info(self):
102
+ """Return class names and mapping"""
103
+ return self.classes, self.class_to_idx
104
+
105
+
106
+ class TransformDataset(torch.utils.data.Dataset):
107
+ """Custom Dataset class to apply transforms to a subset of data"""
108
+
109
+ def __init__(self, dataset, transform=None):
110
+ self.dataset = dataset
111
+ self.transform = transform
112
+
113
+ def __len__(self):
114
+ return len(self.dataset)
115
+
116
+ def __getitem__(self, idx):
117
+ image, label = self.dataset[idx]
118
+ if self.transform:
119
+ image = self.transform(image)
120
+ return image, label
121
+
122
+
123
+ class MangoLeaf:
124
+ def __init__(
125
+ self,
126
+ preprocess,
127
+ location=os.path.expanduser("~/data"),
128
+ batch_size=32,
129
+ num_workers=16,
130
+ ):
131
+ self.location = location
132
+ self.batch_size = batch_size
133
+ self.num_workers = num_workers
134
+
135
+ # Initialize the MangoLeaf dataset
136
+ self.dataset = MangoLeafDataset(preprocess, data_dir=self.location)
137
+ self.train_dataset = self.dataset.get_train_dataset()
138
+ self.test_dataset = self.dataset.get_val_dataset()
139
+ self.classes, self.class_to_idx = self.dataset.get_class_info()
140
+ self.train_loader = torch.utils.data.DataLoader(
141
+ self.train_dataset,
142
+ shuffle=True,
143
+ batch_size=batch_size,
144
+ num_workers=num_workers,
145
+ )
146
+ self.test_loader = torch.utils.data.DataLoader(
147
+ self.test_dataset,
148
+ shuffle=False,
149
+ batch_size=batch_size,
150
+ num_workers=num_workers,
151
+ )
152
+ self.classnames = self.classes
datasets/mnist.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torchvision.datasets as datasets
4
+
5
+ class MNIST:
6
+ def __init__(self,
7
+ preprocess,
8
+ location=os.path.expanduser('~/data'),
9
+ batch_size=128,
10
+ num_workers=16):
11
+
12
+
13
+ self.train_dataset = datasets.MNIST(
14
+ root=location,
15
+ download=True,
16
+ train=True,
17
+ transform=preprocess
18
+ )
19
+
20
+ self.train_loader = torch.utils.data.DataLoader(
21
+ self.train_dataset,
22
+ batch_size=batch_size,
23
+ shuffle=True,
24
+ num_workers=num_workers
25
+ )
26
+
27
+ self.test_dataset = datasets.MNIST(
28
+ root=location,
29
+ download=True,
30
+ train=False,
31
+ transform=preprocess
32
+ )
33
+
34
+ self.test_loader = torch.utils.data.DataLoader(
35
+ self.test_dataset,
36
+ batch_size=batch_size,
37
+ shuffle=False,
38
+ num_workers=num_workers
39
+ )
40
+
41
+ self.classnames = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
datasets/oxfordpets.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.datasets.common2 import RGBDatasetLoader
2
+ import torchvision
3
+ from torch.utils.data import DataLoader
4
+ from typing import Tuple
5
+ import os
6
+
7
+
8
+ class OxfordPetsLoader(RGBDatasetLoader):
9
+ """Loader for Oxford-IIIT Pet dataset"""
10
+
11
+ def load_dataset(self) -> Tuple[DataLoader, DataLoader]:
12
+ self.train_dataset = torchvision.datasets.OxfordIIITPet(
13
+ root=self.root_dir,
14
+ split="trainval",
15
+ transform=self.train_transform,
16
+ download=True,
17
+ )
18
+
19
+ self.test_dataset = torchvision.datasets.OxfordIIITPet(
20
+ root=self.root_dir,
21
+ split="test",
22
+ transform=self.test_transform,
23
+ download=True,
24
+ )
25
+
26
+ return self._create_dataloaders(self.train_dataset, self.test_dataset)
27
+
28
+
29
+ class OxfordPets:
30
+ def __init__(
31
+ self,
32
+ preprocess,
33
+ location=os.path.expanduser("~/data"),
34
+ batch_size=32,
35
+ num_workers=16,
36
+ ):
37
+ location = os.path.join(location, "oxfordpets")
38
+ self.loader = OxfordPetsLoader(preprocess, location, batch_size, num_workers)
39
+ self.loader.load_dataset()
40
+ self.train_dataset = self.loader.train_dataset
41
+ self.test_dataset = self.loader.test_dataset
42
+ self.train_loader, self.test_loader = self.loader.load_dataset()
43
+
44
+ idx_to_class = dict(
45
+ (v, k)
46
+ for k, v in self.train_dataset.class_to_idx.items() # Changed from self.train_loader.dataset
47
+ )
48
+ self.classnames = [
49
+ idx_to_class[i].replace("_", " ").lower() for i in range(len(idx_to_class))
50
+ ]
datasets/registry.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import inspect
3
+ import random
4
+ import torch
5
+ import copy
6
+
7
+ from torch.utils.data.dataset import random_split
8
+
9
+ from src.datasets.cars import Cars
10
+ from src.datasets.cifar10 import CIFAR10
11
+ from src.datasets.cifar100 import CIFAR100
12
+ from src.datasets.dtd import DTD
13
+ from src.datasets.eurosat import EuroSAT, EuroSATVal
14
+ from src.datasets.gtsrb import GTSRB
15
+ from src.datasets.imagenet import ImageNet
16
+ from src.datasets.mnist import MNIST
17
+ from src.datasets.resisc45 import RESISC45
18
+ from src.datasets.stl10 import STL10
19
+ from src.datasets.svhn import SVHN
20
+ from src.datasets.sun397 import SUN397
21
+ from src.datasets.kvasir import Kvasir
22
+ from src.datasets.landscape import Landscape
23
+ from src.datasets.mangoleaf import MangoLeaf
24
+ from src.datasets.oxfordpets import OxfordPets
25
+ from src.datasets.weather import Weather
26
+ from src.datasets.beans import Beans
27
+ from src.datasets.fashionmnist import FashionMNIST
28
+ from src.datasets.flowers import Flowers
29
+ from src.datasets.garbage import Garbage
30
+ from src.datasets.birds import Birds
31
+ from src.datasets.dogs import Dogs
32
+
33
+ registry = {
34
+ name: obj
35
+ for name, obj in inspect.getmembers(sys.modules[__name__], inspect.isclass)
36
+ }
37
+
38
+
39
+ class GenericDataset(object):
40
+ def __init__(self):
41
+ self.train_dataset = None
42
+ self.train_loader = None
43
+ self.test_dataset = None
44
+ self.test_loader = None
45
+ self.classnames = None
46
+
47
+
48
+ def split_train_into_train_val(
49
+ dataset,
50
+ new_dataset_class_name,
51
+ batch_size,
52
+ num_workers,
53
+ val_fraction,
54
+ max_val_samples=None,
55
+ seed=0,
56
+ ):
57
+ assert val_fraction > 0.0 and val_fraction < 1.0
58
+ total_size = len(dataset.train_dataset)
59
+ val_size = int(total_size * val_fraction)
60
+ if max_val_samples is not None:
61
+ val_size = min(val_size, max_val_samples)
62
+ train_size = total_size - val_size
63
+
64
+ assert val_size > 0
65
+ assert train_size > 0
66
+
67
+ lengths = [train_size, val_size]
68
+
69
+ trainset, valset = random_split(
70
+ dataset.train_dataset, lengths, generator=torch.Generator().manual_seed(seed)
71
+ )
72
+ if new_dataset_class_name == "MNISTVal":
73
+ assert trainset.indices[0] == 36044
74
+
75
+ new_dataset = None
76
+
77
+ new_dataset_class = type(new_dataset_class_name, (GenericDataset,), {})
78
+ new_dataset = new_dataset_class()
79
+
80
+ new_dataset.train_dataset = trainset
81
+ new_dataset.train_loader = torch.utils.data.DataLoader(
82
+ new_dataset.train_dataset,
83
+ shuffle=True,
84
+ batch_size=batch_size,
85
+ num_workers=num_workers,
86
+ )
87
+
88
+ new_dataset.test_dataset = valset
89
+ new_dataset.test_loader = torch.utils.data.DataLoader(
90
+ new_dataset.test_dataset, batch_size=batch_size, num_workers=num_workers
91
+ )
92
+
93
+ new_dataset.classnames = copy.copy(dataset.classnames)
94
+
95
+ return new_dataset
96
+
97
+
98
+ def get_dataset(
99
+ dataset_name,
100
+ preprocess,
101
+ location,
102
+ batch_size=128,
103
+ num_workers=16,
104
+ val_fraction=0.1,
105
+ max_val_samples=5000,
106
+ ):
107
+ if dataset_name.endswith("Val"):
108
+ # Handle val splits
109
+ if dataset_name in registry:
110
+ dataset_class = registry[dataset_name]
111
+ else:
112
+ base_dataset_name = dataset_name.split("Val")[0]
113
+ base_dataset = get_dataset(
114
+ base_dataset_name, preprocess, location, batch_size, num_workers
115
+ )
116
+ dataset = split_train_into_train_val(
117
+ base_dataset,
118
+ dataset_name,
119
+ batch_size,
120
+ num_workers,
121
+ val_fraction,
122
+ max_val_samples,
123
+ )
124
+ return dataset
125
+ else:
126
+ assert (
127
+ dataset_name in registry
128
+ ), f"Unsupported dataset: {dataset_name}. Supported datasets: {list(registry.keys())}"
129
+ dataset_class = registry[dataset_name]
130
+ dataset = dataset_class(
131
+ preprocess, location=location, batch_size=batch_size, num_workers=num_workers
132
+ )
133
+ return dataset
datasets/resisc45.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+
4
+ import abc
5
+ import os
6
+ from typing import Any, Callable, Dict, Optional, Tuple
7
+
8
+ import numpy as np
9
+ import torch
10
+ from torch import Tensor
11
+ from torch.utils.data import Dataset
12
+ from torchvision.datasets import ImageFolder
13
+ from torchvision.datasets.folder import default_loader as pil_loader
14
+
15
+
16
+ # modified from: https://github.com/microsoft/torchgeo
17
+ class VisionDataset(Dataset[Dict[str, Any]], abc.ABC):
18
+ """Abstract base class for datasets lacking geospatial information.
19
+ This base class is designed for datasets with pre-defined image chips.
20
+ """
21
+
22
+ @abc.abstractmethod
23
+ def __getitem__(self, index: int) -> Dict[str, Any]:
24
+ """Return an index within the dataset.
25
+ Args:
26
+ index: index to return
27
+ Returns:
28
+ data and labels at that index
29
+ Raises:
30
+ IndexError: if index is out of range of the dataset
31
+ """
32
+
33
+ @abc.abstractmethod
34
+ def __len__(self) -> int:
35
+ """Return the length of the dataset.
36
+ Returns:
37
+ length of the dataset
38
+ """
39
+
40
+ def __str__(self) -> str:
41
+ """Return the informal string representation of the object.
42
+ Returns:
43
+ informal string representation
44
+ """
45
+ return f"""\
46
+ {self.__class__.__name__} Dataset
47
+ type: VisionDataset
48
+ size: {len(self)}"""
49
+
50
+
51
+ class VisionClassificationDataset(VisionDataset, ImageFolder):
52
+ """Abstract base class for classification datasets lacking geospatial information.
53
+ This base class is designed for datasets with pre-defined image chips which
54
+ are separated into separate folders per class.
55
+ """
56
+
57
+ def __init__(
58
+ self,
59
+ root: str,
60
+ transforms: Optional[Callable[[Dict[str, Tensor]], Dict[str, Tensor]]] = None,
61
+ loader: Optional[Callable[[str], Any]] = pil_loader,
62
+ is_valid_file: Optional[Callable[[str], bool]] = None,
63
+ ) -> None:
64
+ """Initialize a new VisionClassificationDataset instance.
65
+ Args:
66
+ root: root directory where dataset can be found
67
+ transforms: a function/transform that takes input sample and its target as
68
+ entry and returns a transformed version
69
+ loader: a callable function which takes as input a path to an image and
70
+ returns a PIL Image or numpy array
71
+ is_valid_file: A function that takes the path of an Image file and checks if
72
+ the file is a valid file
73
+ """
74
+ # When transform & target_transform are None, ImageFolder.__getitem__(index)
75
+ # returns a PIL.Image and int for image and label, respectively
76
+ super().__init__(
77
+ root=root,
78
+ transform=None,
79
+ target_transform=None,
80
+ loader=loader,
81
+ is_valid_file=is_valid_file,
82
+ )
83
+
84
+ # Must be set after calling super().__init__()
85
+ self.transforms = transforms
86
+
87
+ def __getitem__(self, index: int) -> Dict[str, Tensor]:
88
+ """Return an index within the dataset.
89
+ Args:
90
+ index: index to return
91
+ Returns:
92
+ data and label at that index
93
+ """
94
+ image, label = self._load_image(index)
95
+
96
+ if self.transforms is not None:
97
+ return self.transforms(image), label
98
+
99
+ return image, label
100
+
101
+ def __len__(self) -> int:
102
+ """Return the number of data points in the dataset.
103
+ Returns:
104
+ length of the dataset
105
+ """
106
+ return len(self.imgs)
107
+
108
+ def _load_image(self, index: int) -> Tuple[Tensor, Tensor]:
109
+ """Load a single image and it's class label.
110
+ Args:
111
+ index: index to return
112
+ Returns:
113
+ the image
114
+ the image class label
115
+ """
116
+ img, label = ImageFolder.__getitem__(self, index)
117
+ label = torch.tensor(label)
118
+ return img, label
119
+
120
+
121
+ class RESISC45Dataset(VisionClassificationDataset):
122
+ """RESISC45 dataset.
123
+ The `RESISC45 <http://www.escience.cn/people/JunweiHan/NWPU-RESISC45.html>`_
124
+ dataset is a dataset for remote sensing image scene classification.
125
+ Dataset features:
126
+ * 31,500 images with 0.2-30 m per pixel resolution (256x256 px)
127
+ * three spectral bands - RGB
128
+ * 45 scene classes, 700 images per class
129
+ * images extracted from Google Earth from over 100 countries
130
+ * images conditions with high variability (resolution, weather, illumination)
131
+ Dataset format:
132
+ * images are three-channel jpgs
133
+ Dataset classes:
134
+ 0. airplane
135
+ 1. airport
136
+ 2. baseball_diamond
137
+ 3. basketball_court
138
+ 4. beach
139
+ 5. bridge
140
+ 6. chaparral
141
+ 7. church
142
+ 8. circular_farmland
143
+ 9. cloud
144
+ 10. commercial_area
145
+ 11. dense_residential
146
+ 12. desert
147
+ 13. forest
148
+ 14. freeway
149
+ 15. golf_course
150
+ 16. ground_track_field
151
+ 17. harbor
152
+ 18. industrial_area
153
+ 19. intersection
154
+ 20. island
155
+ 21. lake
156
+ 22. meadow
157
+ 23. medium_residential
158
+ 24. mobile_home_park
159
+ 25. mountain
160
+ 26. overpass
161
+ 27. palace
162
+ 28. parking_lot
163
+ 29. railway
164
+ 30. railway_station
165
+ 31. rectangular_farmland
166
+ 32. river
167
+ 33. roundabout
168
+ 34. runway
169
+ 35. sea_ice
170
+ 36. ship
171
+ 37. snowberg
172
+ 38. sparse_residential
173
+ 39. stadium
174
+ 40. storage_tank
175
+ 41. tennis_court
176
+ 42. terrace
177
+ 43. thermal_power_station
178
+ 44. wetland
179
+ This dataset uses the train/val/test splits defined in the "In-domain representation
180
+ learning for remote sensing" paper:
181
+ * https://arxiv.org/abs/1911.06721
182
+ If you use this dataset in your research, please cite the following paper:
183
+ * https://doi.org/10.1109/jproc.2017.2675998
184
+ """
185
+
186
+ # url = "https://drive.google.com/file/d/1DnPSU5nVSN7xv95bpZ3XQ0JhKXZOKgIv"
187
+ # md5 = "d824acb73957502b00efd559fc6cfbbb"
188
+ # filename = "NWPU-RESISC45.rar"
189
+ directory = "resisc45/NWPU-RESISC45"
190
+
191
+ splits = ["train", "val", "test"]
192
+ split_urls = {
193
+ "train": "https://storage.googleapis.com/remote_sensing_representations/resisc45-train.txt", # noqa: E501
194
+ "val": "https://storage.googleapis.com/remote_sensing_representations/resisc45-val.txt", # noqa: E501
195
+ "test": "https://storage.googleapis.com/remote_sensing_representations/resisc45-test.txt", # noqa: E501
196
+ }
197
+ split_md5s = {
198
+ "train": "b5a4c05a37de15e4ca886696a85c403e",
199
+ "val": "a0770cee4c5ca20b8c32bbd61e114805",
200
+ "test": "3dda9e4988b47eb1de9f07993653eb08",
201
+ }
202
+ classes = [
203
+ "airplane",
204
+ "airport",
205
+ "baseball_diamond",
206
+ "basketball_court",
207
+ "beach",
208
+ "bridge",
209
+ "chaparral",
210
+ "church",
211
+ "circular_farmland",
212
+ "cloud",
213
+ "commercial_area",
214
+ "dense_residential",
215
+ "desert",
216
+ "forest",
217
+ "freeway",
218
+ "golf_course",
219
+ "ground_track_field",
220
+ "harbor",
221
+ "industrial_area",
222
+ "intersection",
223
+ "island",
224
+ "lake",
225
+ "meadow",
226
+ "medium_residential",
227
+ "mobile_home_park",
228
+ "mountain",
229
+ "overpass",
230
+ "palace",
231
+ "parking_lot",
232
+ "railway",
233
+ "railway_station",
234
+ "rectangular_farmland",
235
+ "river",
236
+ "roundabout",
237
+ "runway",
238
+ "sea_ice",
239
+ "ship",
240
+ "snowberg",
241
+ "sparse_residential",
242
+ "stadium",
243
+ "storage_tank",
244
+ "tennis_court",
245
+ "terrace",
246
+ "thermal_power_station",
247
+ "wetland",
248
+ ]
249
+
250
+ def __init__(
251
+ self,
252
+ root: str = "data",
253
+ split: str = "train",
254
+ transforms: Optional[Callable[[Dict[str, Tensor]], Dict[str, Tensor]]] = None,
255
+ ) -> None:
256
+ """Initialize a new RESISC45 dataset instance.
257
+ Args:
258
+ root: root directory where dataset can be found
259
+ split: one of "train", "val", or "test"
260
+ transforms: a function/transform that takes input sample and its target as
261
+ entry and returns a transformed version
262
+ """
263
+ assert split in self.splits
264
+ self.root = root
265
+
266
+ valid_fns = set()
267
+ with open(os.path.join(self.root, "resisc45", f"resisc45-{split}.txt")) as f:
268
+ for fn in f:
269
+ valid_fns.add(fn.strip())
270
+ is_in_split: Callable[[str], bool] = lambda x: os.path.basename(
271
+ x) in valid_fns
272
+
273
+ super().__init__(
274
+ root=os.path.join(root, self.directory),
275
+ transforms=transforms,
276
+ is_valid_file=is_in_split,
277
+ )
278
+
279
+
280
+
281
+ class RESISC45:
282
+ def __init__(self,
283
+ preprocess,
284
+ location=os.path.expanduser('~/data'),
285
+ batch_size=32,
286
+ num_workers=16):
287
+
288
+ self.train_dataset = RESISC45Dataset(root=location, split='train', transforms=preprocess)
289
+ self.train_loader = torch.utils.data.DataLoader(
290
+ self.train_dataset,
291
+ shuffle=True,
292
+ batch_size=batch_size,
293
+ num_workers=num_workers,
294
+ )
295
+
296
+ self.test_dataset = RESISC45Dataset(root=location, split='test', transforms=preprocess)
297
+ self.test_loader = torch.utils.data.DataLoader(
298
+ self.test_dataset,
299
+ batch_size=batch_size,
300
+ num_workers=num_workers
301
+ )
302
+
303
+ # class names have _ so split on this for better zero-shot head
304
+ self.classnames = [' '.join(c.split('_')) for c in RESISC45Dataset.classes]
datasets/stl10.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torchvision.datasets as datasets
4
+
5
+ class STL10:
6
+ def __init__(self,
7
+ preprocess,
8
+ location=os.path.expanduser('~/data'),
9
+ batch_size=128,
10
+ num_workers=16):
11
+
12
+ location = os.path.join(location, 'stl10')
13
+ self.train_dataset = datasets.STL10(
14
+ root=location,
15
+ download=True,
16
+ split='train',
17
+ transform=preprocess
18
+ )
19
+
20
+ self.train_loader = torch.utils.data.DataLoader(
21
+ self.train_dataset,
22
+ batch_size=batch_size,
23
+ shuffle=True,
24
+ num_workers=num_workers
25
+ )
26
+
27
+ self.test_dataset = datasets.STL10(
28
+ root=location,
29
+ download=True,
30
+ split='test',
31
+ transform=preprocess
32
+ )
33
+
34
+ self.test_loader = torch.utils.data.DataLoader(
35
+ self.test_dataset,
36
+ batch_size=batch_size,
37
+ shuffle=False,
38
+ num_workers=num_workers
39
+ )
40
+
41
+ self.classnames = self.train_dataset.classes
datasets/sun397.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torchvision.datasets as datasets
4
+
5
+ class SUN397:
6
+ def __init__(self,
7
+ preprocess,
8
+ location=os.path.expanduser('~/data'),
9
+ batch_size=32,
10
+ num_workers=0):
11
+ # Data loading code
12
+ traindir = os.path.join(location, 'sun397', 'train')
13
+ valdir = os.path.join(location, 'sun397', 'test')
14
+
15
+
16
+ self.train_dataset = datasets.ImageFolder(traindir, transform=preprocess)
17
+ self.train_loader = torch.utils.data.DataLoader(
18
+ self.train_dataset,
19
+ shuffle=True,
20
+ batch_size=batch_size,
21
+ num_workers=num_workers,
22
+ )
23
+
24
+ self.test_dataset = datasets.ImageFolder(valdir, transform=preprocess)
25
+ self.test_loader = torch.utils.data.DataLoader(
26
+ self.test_dataset,
27
+ batch_size=batch_size,
28
+ num_workers=num_workers
29
+ )
30
+ self.test_loader_shuffle = torch.utils.data.DataLoader(
31
+ self.test_dataset,
32
+ shuffle=True,
33
+ batch_size=batch_size,
34
+ num_workers=num_workers
35
+ )
36
+ idx_to_class = dict((v, k)
37
+ for k, v in self.train_dataset.class_to_idx.items())
38
+ self.classnames = [idx_to_class[i][2:].replace('_', ' ') for i in range(len(idx_to_class))]
datasets/svhn.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from torchvision.datasets import SVHN as PyTorchSVHN
4
+ import numpy as np
5
+
6
+
7
+ class SVHN:
8
+ def __init__(self,
9
+ preprocess,
10
+ location=os.path.expanduser('~/data'),
11
+ batch_size=128,
12
+ num_workers=16):
13
+
14
+ # to fit with repo conventions for location
15
+ modified_location = os.path.join(location, 'svhn')
16
+
17
+ self.train_dataset = PyTorchSVHN(
18
+ root=modified_location,
19
+ download=True,
20
+ split='train',
21
+ transform=preprocess
22
+ )
23
+
24
+ self.train_loader = torch.utils.data.DataLoader(
25
+ self.train_dataset,
26
+ batch_size=batch_size,
27
+ shuffle=True,
28
+ num_workers=num_workers
29
+ )
30
+
31
+ self.test_dataset = PyTorchSVHN(
32
+ root=modified_location,
33
+ download=True,
34
+ split='test',
35
+ transform=preprocess
36
+ )
37
+
38
+ self.test_loader = torch.utils.data.DataLoader(
39
+ self.test_dataset,
40
+ batch_size=batch_size,
41
+ shuffle=False,
42
+ num_workers=num_workers
43
+ )
44
+
45
+ self.classnames = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
datasets/templates.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cars_template = [
2
+ lambda c: f"a photo of a {c}.",
3
+ lambda c: f"a photo of the {c}.",
4
+ lambda c: f"a photo of my {c}.",
5
+ lambda c: f"i love my {c}!",
6
+ lambda c: f"a photo of my dirty {c}.",
7
+ lambda c: f"a photo of my clean {c}.",
8
+ lambda c: f"a photo of my new {c}.",
9
+ lambda c: f"a photo of my old {c}.",
10
+ ]
11
+
12
+ cifar10_template = [
13
+ lambda c: f"a photo of a {c}.",
14
+ lambda c: f"a blurry photo of a {c}.",
15
+ lambda c: f"a black and white photo of a {c}.",
16
+ lambda c: f"a low contrast photo of a {c}.",
17
+ lambda c: f"a high contrast photo of a {c}.",
18
+ lambda c: f"a bad photo of a {c}.",
19
+ lambda c: f"a good photo of a {c}.",
20
+ lambda c: f"a photo of a small {c}.",
21
+ lambda c: f"a photo of a big {c}.",
22
+ lambda c: f"a photo of the {c}.",
23
+ lambda c: f"a blurry photo of the {c}.",
24
+ lambda c: f"a black and white photo of the {c}.",
25
+ lambda c: f"a low contrast photo of the {c}.",
26
+ lambda c: f"a high contrast photo of the {c}.",
27
+ lambda c: f"a bad photo of the {c}.",
28
+ lambda c: f"a good photo of the {c}.",
29
+ lambda c: f"a photo of the small {c}.",
30
+ lambda c: f"a photo of the big {c}.",
31
+ ]
32
+
33
+ cifar100_template = [
34
+ lambda c: f"a photo of a {c}.",
35
+ lambda c: f"a blurry photo of a {c}.",
36
+ lambda c: f"a black and white photo of a {c}.",
37
+ lambda c: f"a low contrast photo of a {c}.",
38
+ lambda c: f"a high contrast photo of a {c}.",
39
+ lambda c: f"a bad photo of a {c}.",
40
+ lambda c: f"a good photo of a {c}.",
41
+ lambda c: f"a photo of a small {c}.",
42
+ lambda c: f"a photo of a big {c}.",
43
+ lambda c: f"a photo of the {c}.",
44
+ lambda c: f"a blurry photo of the {c}.",
45
+ lambda c: f"a black and white photo of the {c}.",
46
+ lambda c: f"a low contrast photo of the {c}.",
47
+ lambda c: f"a high contrast photo of the {c}.",
48
+ lambda c: f"a bad photo of the {c}.",
49
+ lambda c: f"a good photo of the {c}.",
50
+ lambda c: f"a photo of the small {c}.",
51
+ lambda c: f"a photo of the big {c}.",
52
+ ]
53
+
54
+ dtd_template = [
55
+ lambda c: f"a photo of a {c} texture.",
56
+ lambda c: f"a photo of a {c} pattern.",
57
+ lambda c: f"a photo of a {c} thing.",
58
+ lambda c: f"a photo of a {c} object.",
59
+ lambda c: f"a photo of the {c} texture.",
60
+ lambda c: f"a photo of the {c} pattern.",
61
+ lambda c: f"a photo of the {c} thing.",
62
+ lambda c: f"a photo of the {c} object.",
63
+ ]
64
+
65
+ eurosat_template = [
66
+ lambda c: f"a centered satellite photo of {c}.",
67
+ lambda c: f"a centered satellite photo of a {c}.",
68
+ lambda c: f"a centered satellite photo of the {c}.",
69
+ ]
70
+
71
+ food101_template = [
72
+ lambda c: f"a photo of {c}, a type of food.",
73
+ ]
74
+
75
+ oxfordpets_template = [
76
+ lambda c: f"a photo of a pet {c}.",
77
+ lambda c: f"a photo of the pet {c}.",
78
+ ]
79
+
80
+
81
+ gtsrb_template = [
82
+ lambda c: f'a zoomed in photo of a "{c}" traffic sign.',
83
+ lambda c: f'a centered photo of a "{c}" traffic sign.',
84
+ lambda c: f'a close up photo of a "{c}" traffic sign.',
85
+ ]
86
+
87
+ mnist_template = [
88
+ lambda c: f'a photo of the number: "{c}".',
89
+ ]
90
+
91
+ imagenet_template = [
92
+ lambda c: f"a bad photo of a {c}.",
93
+ lambda c: f"a photo of many {c}.",
94
+ lambda c: f"a sculpture of a {c}.",
95
+ lambda c: f"a photo of the hard to see {c}.",
96
+ lambda c: f"a low resolution photo of the {c}.",
97
+ lambda c: f"a rendering of a {c}.",
98
+ lambda c: f"graffiti of a {c}.",
99
+ lambda c: f"a bad photo of the {c}.",
100
+ lambda c: f"a cropped photo of the {c}.",
101
+ lambda c: f"a tattoo of a {c}.",
102
+ lambda c: f"the embroidered {c}.",
103
+ lambda c: f"a photo of a hard to see {c}.",
104
+ lambda c: f"a bright photo of a {c}.",
105
+ lambda c: f"a photo of a clean {c}.",
106
+ lambda c: f"a photo of a dirty {c}.",
107
+ lambda c: f"a dark photo of the {c}.",
108
+ lambda c: f"a drawing of a {c}.",
109
+ lambda c: f"a photo of my {c}.",
110
+ lambda c: f"the plastic {c}.",
111
+ lambda c: f"a photo of the cool {c}.",
112
+ lambda c: f"a close-up photo of a {c}.",
113
+ lambda c: f"a black and white photo of the {c}.",
114
+ lambda c: f"a painting of the {c}.",
115
+ lambda c: f"a painting of a {c}.",
116
+ lambda c: f"a pixelated photo of the {c}.",
117
+ lambda c: f"a sculpture of the {c}.",
118
+ lambda c: f"a bright photo of the {c}.",
119
+ lambda c: f"a cropped photo of a {c}.",
120
+ lambda c: f"a plastic {c}.",
121
+ lambda c: f"a photo of the dirty {c}.",
122
+ lambda c: f"a jpeg corrupted photo of a {c}.",
123
+ lambda c: f"a blurry photo of the {c}.",
124
+ lambda c: f"a photo of the {c}.",
125
+ lambda c: f"a good photo of the {c}.",
126
+ lambda c: f"a rendering of the {c}.",
127
+ lambda c: f"a {c} in a video game.",
128
+ lambda c: f"a photo of one {c}.",
129
+ lambda c: f"a doodle of a {c}.",
130
+ lambda c: f"a close-up photo of the {c}.",
131
+ lambda c: f"a photo of a {c}.",
132
+ lambda c: f"the origami {c}.",
133
+ lambda c: f"the {c} in a video game.",
134
+ lambda c: f"a sketch of a {c}.",
135
+ lambda c: f"a doodle of the {c}.",
136
+ lambda c: f"a origami {c}.",
137
+ lambda c: f"a low resolution photo of a {c}.",
138
+ lambda c: f"the toy {c}.",
139
+ lambda c: f"a rendition of the {c}.",
140
+ lambda c: f"a photo of the clean {c}.",
141
+ lambda c: f"a photo of a large {c}.",
142
+ lambda c: f"a rendition of a {c}.",
143
+ lambda c: f"a photo of a nice {c}.",
144
+ lambda c: f"a photo of a weird {c}.",
145
+ lambda c: f"a blurry photo of a {c}.",
146
+ lambda c: f"a cartoon {c}.",
147
+ lambda c: f"art of a {c}.",
148
+ lambda c: f"a sketch of the {c}.",
149
+ lambda c: f"a embroidered {c}.",
150
+ lambda c: f"a pixelated photo of a {c}.",
151
+ lambda c: f"itap of the {c}.",
152
+ lambda c: f"a jpeg corrupted photo of the {c}.",
153
+ lambda c: f"a good photo of a {c}.",
154
+ lambda c: f"a plushie {c}.",
155
+ lambda c: f"a photo of the nice {c}.",
156
+ lambda c: f"a photo of the small {c}.",
157
+ lambda c: f"a photo of the weird {c}.",
158
+ lambda c: f"the cartoon {c}.",
159
+ lambda c: f"art of the {c}.",
160
+ lambda c: f"a drawing of the {c}.",
161
+ lambda c: f"a photo of the large {c}.",
162
+ lambda c: f"a black and white photo of a {c}.",
163
+ lambda c: f"the plushie {c}.",
164
+ lambda c: f"a dark photo of a {c}.",
165
+ lambda c: f"itap of a {c}.",
166
+ lambda c: f"graffiti of the {c}.",
167
+ lambda c: f"a toy {c}.",
168
+ lambda c: f"itap of my {c}.",
169
+ lambda c: f"a photo of a cool {c}.",
170
+ lambda c: f"a photo of a small {c}.",
171
+ lambda c: f"a tattoo of the {c}.",
172
+ ]
173
+
174
+ resisc45_template = [
175
+ lambda c: f"satellite imagery of {c}.",
176
+ lambda c: f"aerial imagery of {c}.",
177
+ lambda c: f"satellite photo of {c}.",
178
+ lambda c: f"aerial photo of {c}.",
179
+ lambda c: f"satellite view of {c}.",
180
+ lambda c: f"aerial view of {c}.",
181
+ lambda c: f"satellite imagery of a {c}.",
182
+ lambda c: f"aerial imagery of a {c}.",
183
+ lambda c: f"satellite photo of a {c}.",
184
+ lambda c: f"aerial photo of a {c}.",
185
+ lambda c: f"satellite view of a {c}.",
186
+ lambda c: f"aerial view of a {c}.",
187
+ lambda c: f"satellite imagery of the {c}.",
188
+ lambda c: f"aerial imagery of the {c}.",
189
+ lambda c: f"satellite photo of the {c}.",
190
+ lambda c: f"aerial photo of the {c}.",
191
+ lambda c: f"satellite view of the {c}.",
192
+ lambda c: f"aerial view of the {c}.",
193
+ ]
194
+
195
+ stl10_template = [
196
+ lambda c: f"a photo of a {c}.",
197
+ lambda c: f"a photo of the {c}.",
198
+ ]
199
+
200
+ sun397_template = [
201
+ lambda c: f"a photo of a {c}.",
202
+ lambda c: f"a photo of the {c}.",
203
+ ]
204
+
205
+ svhn_template = [
206
+ lambda c: f'a photo of the number: "{c}".',
207
+ ]
208
+
209
+ fashionmnist_template = [
210
+ lambda c: f'a photo of an apparel: "{c}".',
211
+ lambda c: f'a photo of the apparel: "{c}".',
212
+ lambda c: f'a photo of a piece of fashion item: "{c}".',
213
+ lambda c: f'a photo of the fashion item: "{c}".',
214
+ ]
215
+
216
+ beans_template = [
217
+ lambda c: f'a photo of a bean leaf showing signs of "{c}".',
218
+ lambda c: f'a close-up photo of a bean plant with "{c}".',
219
+ lambda c: f'an image of a bean leaf affected by "{c}".',
220
+ lambda c: f'a photo of a bean leaf condition classified as "{c}".',
221
+ ]
222
+
223
+ flowers_template = [
224
+ lambda c: f'a photo of a flower: "{c}".',
225
+ lambda c: f'an image of a "{c}" flower.',
226
+ lambda c: f'a close-up picture of a "{c}".',
227
+ lambda c: f'a beautiful photo of a "{c}" in bloom.',
228
+ ]
229
+
230
+ garbage_template = [
231
+ lambda c: f'a photo of a piece of "{c}" waste.',
232
+ lambda c: f'an image showing a piece of "{c}".',
233
+ lambda c: f'a close-up photo of a "{c}" item for recycling.',
234
+ lambda c: f'a photo of a type of garbage: "{c}".',
235
+ ]
236
+
237
+ kvasir_template = [
238
+ lambda c: f'an image showing the condition: "{c}".',
239
+ lambda c: f'a photo of a medical image showing the presence of: "{c}".',
240
+ lambda c: f'an image depicting the condition of: "{c}".',
241
+ lambda c: f'an endoscopic view showing: "{c}".',
242
+ lambda c: f"a photo of the {c} in a medical setting.",
243
+ lambda c: f'an image illustrating the symptoms of: "{c}".',
244
+ lambda c: f'an image captured during an endoscopy showing: "{c}".',
245
+ lambda c: f'an endoscopic image showing the characteristic signs of: "{c}".',
246
+ ]
247
+
248
+ landscape_template = [
249
+ lambda c: f'an image of a landscape: "{c}".',
250
+ lambda c: f'a photo showing a beautiful landscape: "{c}".',
251
+ lambda c: f"a scenic view of the {c} landscape.",
252
+ lambda c: f"a photo capturing the beauty of a {c}.",
253
+ lambda c: f"an aerial view of the {c} landscape.",
254
+ lambda c: f"a stunning image of the {c} in nature.",
255
+ lambda c: f"an image of the {c} terrain.",
256
+ lambda c: f"an outdoor scene featuring the {c}.",
257
+ ]
258
+
259
+ weather_template = [
260
+ lambda c: f'a photo of the weather condition: "{c}".',
261
+ lambda c: f'a photo showing the weather: "{c}".',
262
+ lambda c: f'an image of a weather phenomenon: "{c}".',
263
+ lambda c: f'a photo capturing the weather event: "{c}".',
264
+ lambda c: f'a photo illustrating the weather condition: "{c}".',
265
+ ]
266
+
267
+ mangoLeaf_template = [
268
+ lambda c: f'a close-up photo of a mango leaf showing signs of "{c}".',
269
+ lambda c: f'a photo of a mango leaf affected by "{c}".',
270
+ lambda c: f'a photo of a mango leaf with the disease or condition "{c}".',
271
+ lambda c: f'a close-up of a mango leaf exhibiting symptoms of "{c}".',
272
+ lambda c: f'a photo showing a mango leaf with visible signs of "{c}".',
273
+ ]
274
+
275
+ dogs_template = [
276
+ lambda c: f'a photo of a dog: "{c}".',
277
+ lambda c: f'a photo of a kind of dog: "{c}".',
278
+ lambda c: f'a photo of the dog: "{c}".',
279
+ ]
280
+
281
+ birds_template = [
282
+ lambda c: f'a photo of a bird: "{c}".',
283
+ lambda c: f'a photo of a kind of bird: "{c}".',
284
+ lambda c: f'a photo of the bird: "{c}".',
285
+ ]
286
+
287
+ dataset_to_template = {
288
+ "Cars": cars_template,
289
+ "CIFAR10": cifar10_template,
290
+ "CIFAR100": cifar100_template,
291
+ "DTD": dtd_template,
292
+ "EuroSAT": eurosat_template,
293
+ "Food101": food101_template,
294
+ "GTSRB": gtsrb_template,
295
+ "MNIST": mnist_template,
296
+ "ImageNet": imagenet_template,
297
+ "RESISC45": resisc45_template,
298
+ "STL10": stl10_template,
299
+ "SUN397": sun397_template,
300
+ "SVHN": svhn_template,
301
+ "FashionMNIST": fashionmnist_template,
302
+ "OxfordPets": oxfordpets_template,
303
+ "Beans": beans_template,
304
+ "Flowers": flowers_template,
305
+ "Garbage": garbage_template,
306
+ "Kvasir": kvasir_template,
307
+ "Landscape": landscape_template,
308
+ "Weather": weather_template,
309
+ "MangoLeaf": mangoLeaf_template,
310
+ "Dogs": dogs_template,
311
+ "Birds": birds_template,
312
+ }
313
+
314
+
315
+ def get_templates(dataset_name):
316
+ if dataset_name.endswith("Val"):
317
+ return get_templates(dataset_name.replace("Val", ""))
318
+ assert dataset_name in dataset_to_template, f"Unsupported dataset: {dataset_name}"
319
+ return dataset_to_template[dataset_name]
datasets/weather.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from torchvision import transforms, datasets
4
+ from torch.utils.data import random_split
5
+ import numpy as np
6
+
7
+
8
+ class WeatherDataset:
9
+ def __init__(
10
+ self,
11
+ preprocess,
12
+ data_dir="theRestDataset/weather",
13
+ image_size=224,
14
+ val_split=0.2,
15
+ seed=42,
16
+ ):
17
+ """
18
+ Initialize the Weather dataset with train/validation split and augmentations
19
+
20
+ Args:
21
+ data_dir (str): Path to the dataset directory
22
+ image_size (int): Size to resize images to
23
+ val_split (float): Fraction of data to use for validation
24
+ seed (int): Random seed for reproducibility
25
+ """
26
+ self.data_dir = data_dir
27
+ self.image_size = image_size
28
+ self.val_split = val_split
29
+ self.seed = seed
30
+ self.preprocess = preprocess
31
+ # Define transforms for training
32
+ if self.preprocess:
33
+ self.train_transforms = transforms.Compose(
34
+ [
35
+ transforms.RandomResizedCrop(self.image_size),
36
+ transforms.RandomHorizontalFlip(),
37
+ transforms.RandomRotation(15),
38
+ transforms.ColorJitter(
39
+ brightness=0.2, contrast=0.2, saturation=0.2
40
+ ),
41
+ transforms.ToTensor(),
42
+ transforms.Normalize(
43
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
44
+ ),
45
+ ]
46
+ )
47
+
48
+ # Define transforms for validation
49
+ self.test_transforms = transforms.Compose(
50
+ [
51
+ transforms.Resize(self.image_size + 32),
52
+ transforms.CenterCrop(self.image_size),
53
+ transforms.ToTensor(),
54
+ transforms.Normalize(
55
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
56
+ ),
57
+ ]
58
+ )
59
+ else:
60
+ self.train_transforms = transforms.Compose(
61
+ [
62
+ transforms.Resize((self.image_size, self.image_size)),
63
+ transforms.ToTensor(),
64
+ ]
65
+ )
66
+
67
+ self.test_transforms = transforms.Compose(
68
+ [
69
+ transforms.Resize((self.image_size, self.image_size)),
70
+ transforms.ToTensor(),
71
+ ]
72
+ )
73
+ # Create train/val splits
74
+ self._create_splits()
75
+
76
+ def _create_splits(self):
77
+ """Create train and validation splits"""
78
+ # Load the full dataset
79
+ full_dataset = datasets.ImageFolder(
80
+ root=self.data_dir, transform=None # We'll apply transforms later
81
+ )
82
+
83
+ # Calculate lengths for train and validation
84
+ val_size = int(len(full_dataset) * self.val_split)
85
+ train_size = len(full_dataset) - val_size
86
+
87
+ # Set random seed for reproducibility
88
+ torch.manual_seed(self.seed)
89
+ np.random.seed(self.seed)
90
+
91
+ # Split the dataset
92
+ self.train_dataset, self.test_dataset = random_split(
93
+ full_dataset, [train_size, val_size]
94
+ )
95
+
96
+ # Create custom datasets with appropriate transforms
97
+ self.train_dataset = TransformDataset(self.train_dataset, self.train_transforms)
98
+ self.test_dataset = TransformDataset(self.test_dataset, self.test_transforms)
99
+
100
+ # Store class names
101
+ self.classes = full_dataset.classes
102
+ self.class_to_idx = full_dataset.class_to_idx
103
+
104
+ def get_train_dataset(self):
105
+ """Return the training dataset"""
106
+ return self.train_dataset
107
+
108
+ def get_val_dataset(self):
109
+ """Return the validation dataset"""
110
+ return self.test_dataset
111
+
112
+ def get_classes(self):
113
+ """Return the list of classes"""
114
+ return self.classes
115
+
116
+
117
+ class TransformDataset(torch.utils.data.Dataset):
118
+ """Custom Dataset that applies transform to a subset of another dataset"""
119
+
120
+ def __init__(self, subset, transform=None):
121
+ self.subset = subset
122
+ self.transform = transform
123
+
124
+ def __getitem__(self, idx):
125
+ x, y = self.subset[idx]
126
+ if self.transform:
127
+ x = self.transform(x)
128
+ return x, y
129
+
130
+ def __len__(self):
131
+ return len(self.subset)
132
+
133
+
134
+ class Weather:
135
+ def __init__(
136
+ self,
137
+ preprocess,
138
+ location=os.path.expanduser("~/data"),
139
+ batch_size=32,
140
+ num_workers=16,
141
+ ):
142
+ location = os.path.join(location, "weather")
143
+ self.dataset = WeatherDataset(preprocess, data_dir=location)
144
+ self.train_dataset = self.dataset.get_train_dataset()
145
+ self.test_dataset = self.dataset.get_val_dataset()
146
+ self.train_loader = torch.utils.data.DataLoader(
147
+ self.train_dataset,
148
+ batch_size=batch_size,
149
+ num_workers=num_workers,
150
+ shuffle=True,
151
+ )
152
+ self.test_loader = torch.utils.data.DataLoader(
153
+ self.test_dataset, batch_size=batch_size, num_workers=num_workers
154
+ )
155
+ self.classnames = self.dataset.get_classes()
156
+ self.class_to_idx = self.dataset.class_to_idx
157
+ self.num_classes = len(self.classnames)
158
+ self.input_shape = (3, 224, 224)