SahithiR commited on
Commit
345e2fa
·
1 Parent(s): aa27bf1

Update S10.py

Browse files
Files changed (1) hide show
  1. S10.py +123 -125
S10.py CHANGED
@@ -2,16 +2,67 @@
2
  import torch
3
  import torch.nn as nn
4
  import pytorch_lightning as pl
 
5
  from torchvision.transforms import ToTensor
 
6
  import torch
7
  import albumentations as A
8
  from albumentations.pytorch import ToTensorV2
9
-
10
  from torchvision import transforms
11
  import numpy as np
 
12
  from torchvision import datasets
13
  from torch.utils.data import Dataset, DataLoader
14
  from torchvision.transforms import ToTensor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
 
17
  torch.manual_seed(1)
@@ -71,6 +122,7 @@ class MyModel(nn.Module):
71
  self.fc = nn.Linear(512, 10, bias=True)
72
 
73
  def forward(self, x):
 
74
  x = self.conv1(x)
75
 
76
  x = self.conv11(x)
@@ -89,148 +141,94 @@ class MyModel(nn.Module):
89
  #x = x.randn(512, 1)
90
 
91
  # squeeze the tensor to size 512x
92
- x = x.squeeze(dim=[2, 3])
 
 
 
 
93
 
94
  #x = x.view(512, 10)
95
 
96
  x = self.fc(x)
97
 
98
  x = x.view(-1, 10)
 
99
  return x
100
 
101
- class MyLightningModule(pl.LightningModule):
102
- def __init__(self):
103
  super().__init__()
 
104
  self.model = MyModel()
 
 
 
 
 
 
 
 
 
105
 
106
  def training_step(self, batch, batch_idx):
107
- x, y = batch
108
- y_hat = self.model(x)
109
- loss = nn.functional.cross_entropy(y_hat, y)
 
 
 
 
110
  return loss
111
 
112
  def validation_step(self, batch, batch_idx):
113
- x, y = batch
114
- y_hat = self.model(x)
115
- loss = nn.functional.cross_entropy(y_hat, y)
116
- acc = (y_hat.argmax(dim=1) == y).float().mean()
117
- self.log('val_loss', loss)
118
- self.log('val_acc', acc)
 
 
 
 
 
 
 
 
 
119
 
120
  def configure_optimizers(self):
121
  optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
122
  return optimizer
123
 
124
- def get_a_train_transform():
125
- """Get transformer for training data
126
-
127
- Returns:
128
- Compose: Composed transformations
129
- """
130
- return A.Compose([
131
- A.Normalize(
132
- mean = (0.4914, 0.4822, 0.4465),
133
- std = (0.2470, 0.2435, 0.2616), always_apply = True
134
- ),
135
- A.HorizontalFlip(),
136
- A.ShiftScaleRotate(shift_limit=(-0.2, 0.2), scale_limit=(-0.2, 0.2), rotate_limit=(-15, 15), p=0.5),
137
- A.PadIfNeeded(min_height=36, min_width=36, p=1.0),
138
- A.RandomCrop (32, 32, always_apply=False, p=1.0),
139
- A.CenterCrop(32, 32, always_apply=False, p=1.0),
140
- A.CoarseDropout(max_holes = 1, max_height=8, max_width=8, min_holes = 1, min_height=8, min_width=8,
141
- fill_value=(0.4914, 0.4822, 0.4465), always_apply=False, p=0.5),
142
-
143
- ToTensorV2()
144
- ])
145
-
146
-
147
-
148
- def get_a_test_transform():
149
- """Get transformer for test data
150
-
151
- Returns:
152
- Compose: Composed transformations
153
- """
154
- return A.Compose([
155
- A.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), always_apply = True),
156
- ToTensorV2()
157
- ])
158
-
159
- class Cifar10SearchDataset(Dataset):
160
- """
161
- Custom Dataset Class
162
-
163
- """
164
-
165
- def __init__(self, dataset, transforms=None):
166
- """Initialize Dataset
167
-
168
- Args:
169
- dataset (Dataset): Pytorch Dataset instance
170
- transforms (Transform.Compose, optional): Tranform function instance. Defaults to None.
171
- """
172
- self.transforms = transforms
173
- self.dataset = dataset
174
-
175
- def __len__(self):
176
- """Get dataset length
177
-
178
- Returns:
179
- int: Length of dataset
180
- """
181
- return len(self.dataset)
182
-
183
- def __getitem__(self, idx):
184
- """Get an item form dataset
185
-
186
- Args:
187
- idx (int): id of item in dataset
188
-
189
- Returns:
190
- (tensor, int): Return tensor of transformer image, label
191
- """
192
- # Read Image and Label
193
- image, label = self.dataset[idx]#,self.targets[idx]
194
-
195
- image = np.array(image)
196
-
197
- # Apply Transforms
198
- if self.transforms is not None:
199
- image = (self.transforms(image=image))["image"]
200
-
201
-
202
- return (image, label)
203
-
204
-
205
- def get_loader(train_transform, test_transform, batch_size=128, use_cuda=True):
206
- """Get instance of tran and test loaders
207
- self.batch_size = 128
208
- self.device = device
209
- self.use_cuda = use_cuda
210
- Args:
211
- train_transform (Transform): Instance of transform function for training
212
- test_transform (Transform): Instance of transform function for validation
213
- batch_size (int, optional): batch size to be uised in training. Defaults to 64.
214
- use_cuda (bool, optional): Enable/Disable Cuda Gpu. Defaults to True.
215
-
216
- Returns:
217
- (DataLoader, DataLoader): Get instance of train and test data loaders
218
- """
219
- kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
220
-
221
- train_loader = DataLoader(
222
- Cifar10SearchDataset(datasets.CIFAR10('../data', train=True,
223
- download=True), transforms=train_transform),
224
- batch_size=batch_size, shuffle=True, **kwargs)
225
-
226
- test_loader = DataLoader(
227
- Cifar10SearchDataset(datasets.CIFAR10('../data', train=False,
228
- download=True), transforms=test_transform),
229
- batch_size=batch_size, shuffle=True, **kwargs)
230
-
231
- return train_loader, test_loader
232
-
233
- use_cuda = torch.cuda.is_available()
234
- device = torch.device("cuda" if use_cuda else "cpu")
235
- train_loader, test_loader = get_loader(get_a_train_transform(), get_a_test_transform(), batch_size=512, use_cuda=use_cuda)
236
 
 
2
  import torch
3
  import torch.nn as nn
4
  import pytorch_lightning as pl
5
+ from torchvision.datasets import MNIST
6
  from torchvision.transforms import ToTensor
7
+ from torch.utils.data import DataLoader, random_split
8
  import torch
9
  import albumentations as A
10
  from albumentations.pytorch import ToTensorV2
 
11
  from torchvision import transforms
12
  import numpy as np
13
+ import torch
14
  from torchvision import datasets
15
  from torch.utils.data import Dataset, DataLoader
16
  from torchvision.transforms import ToTensor
17
+ from torchmetrics import Accuracy
18
+ import matplotlib.pyplot as plt
19
+ from torch.nn import functional as F
20
+
21
+
22
+ class DataAugmentationTrain():
23
+ """Module to perform data augmentation using Kornia on torch tensors."""
24
+
25
+
26
+ def __init__(self):
27
+ self.train_transform = A.Compose([
28
+ A.Normalize(
29
+ mean = (0.4914, 0.4822, 0.4465),
30
+ std = (0.2470, 0.2435, 0.2616), always_apply = True
31
+ ),
32
+ A.HorizontalFlip(p=1.0),
33
+ A.ShiftScaleRotate(shift_limit=(-0.2, 0.2), scale_limit=(-0.2, 0.2), rotate_limit=(-15, 15), p=0.5),
34
+ A.PadIfNeeded(min_height=36, min_width=36, p=1.0),
35
+ A.RandomCrop (32, 32, always_apply=False, p=1.0),
36
+ A.CenterCrop(32, 32, always_apply=False, p=1.0),
37
+ A.CoarseDropout(max_holes = 1, max_height=8, max_width=8, min_holes = 1,
38
+ min_height=8,min_width=8,
39
+ fill_value=(0.4914, 0.4822, 0.4465), always_apply=False,p=0.5),
40
+
41
+ ToTensorV2()
42
+ ])
43
+
44
+ def __call__(self, image):
45
+ image = np.array(image)
46
+ image = self.train_transform(image = image)['image']
47
+ return image
48
+
49
+ class DataAugmentationTest():
50
+ """Module to perform data augmentation using Kornia on torch tensors."""
51
+
52
+
53
+ def __init__(self):
54
+ self.test_transform = A.Compose([
55
+ A.Normalize(
56
+ mean = (0.4914, 0.4822, 0.4465),
57
+ std = (0.2470, 0.2435, 0.2616), always_apply = True
58
+ ),
59
+ ToTensorV2()
60
+ ])
61
+
62
+ def __call__(self, image):
63
+ image = np.array(image)
64
+ img = self.test_transform(image = image)['image']
65
+ return image
66
 
67
 
68
  torch.manual_seed(1)
 
122
  self.fc = nn.Linear(512, 10, bias=True)
123
 
124
  def forward(self, x):
125
+ #x = x.unsqueeze(0)
126
  x = self.conv1(x)
127
 
128
  x = self.conv11(x)
 
141
  #x = x.randn(512, 1)
142
 
143
  # squeeze the tensor to size 512x
144
+ #x = x.squeeze(dim=[2, 3])
145
+ x = x.squeeze(dim=2)
146
+ x = x.squeeze(dim=2)
147
+ #x = x.squeeze(dim=2).squeeze(dim=3)
148
+ #x = x.squeeze(dim=2)
149
 
150
  #x = x.view(512, 10)
151
 
152
  x = self.fc(x)
153
 
154
  x = x.view(-1, 10)
155
+ x = F.log_softmax(x, dim=-1)
156
  return x
157
 
158
+ class Cifar10SearchDataset(pl.LightningModule):
159
+ def __init__(self, train_transform=None, test_transform=None):
160
  super().__init__()
161
+ # not the best model: expereiment yourself
162
  self.model = MyModel()
163
+ #self.preprocess = Preprocess() # per sample transforms
164
+ self.train_transform = DataAugmentationTrain()
165
+ self.test_transform = DataAugmentationTest()
166
+ # per batch augmentation_kornia
167
+ self.train_accuracy = Accuracy(task="multiclass", num_classes=10)
168
+ self.val_accuracy = Accuracy(task="multiclass", num_classes=10)
169
+
170
+ def forward(self, x):
171
+ return self.model(x)
172
 
173
  def training_step(self, batch, batch_idx):
174
+ x, y= batch
175
+ logits = self.model(x)
176
+ loss = F.nll_loss(logits, y)
177
+ preds = torch.argmax(logits, dim=1)
178
+ self.train_accuracy(preds, y)
179
+ self.log("train_loss", loss, prog_bar=True)
180
+ self.log("train_acc", self.train_accuracy, prog_bar=True)
181
  return loss
182
 
183
  def validation_step(self, batch, batch_idx):
184
+ x, y= batch
185
+
186
+ logits = self.model(x)
187
+ loss = F.nll_loss(logits, y)
188
+ preds = torch.argmax(logits, dim=1)
189
+ self.val_accuracy(preds, y)
190
+ self.log("val_loss", loss, prog_bar=True)
191
+ self.log("val_acc", self.val_accuracy, prog_bar=True)
192
+ #self.log('val_loss', loss)
193
+ #self.log('val_acc', acc)
194
+ return loss
195
+
196
+ def test_step(self, batch, batch_idx):
197
+ # Here we just reuse the validation_step for testing
198
+ return self.validation_step(batch, batch_idx)
199
 
200
  def configure_optimizers(self):
201
  optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
202
  return optimizer
203
 
204
+ def prepare_data(self):
205
+ # download data if needed
206
+ datasets.CIFAR10(root='./data', train=True, download=True)
207
+ datasets.CIFAR10(root='./data', train=False, download=True)
208
+ pass
209
+
210
+ def setup(self, stage=None):
211
+ # split data into train and test sets
212
+ if stage == "fit" or stage is None:
213
+ cifar10_full = datasets.CIFAR10(root='./data', train=True, download=True, transform = self.train_transform)
214
+ self.cifar10_train, self.cifar10_val = random_split(cifar10_full, [45000, 5000])
215
+
216
+ if stage == "test" or stage is None:
217
+ self.cifar10_test = datasets.CIFAR10(root='./data', train=False, download=True, transform = self.test_transform)
218
+
219
+ def train_dataloader(self):
220
+ return DataLoader(self.cifar10_train, batch_size=64)
221
+
222
+ def val_dataloader(self):
223
+ return DataLoader(self.cifar10_val, batch_size=64)
224
+
225
+ def test_dataloader(self):
226
+ return DataLoader(self.cifar10_test, batch_size=64)
227
+
228
+
229
+ model = Cifar10SearchDataset()
230
+ trainer = pl.Trainer(accelerator="auto",max_epochs=1)
231
+ #datasetcifar=Cifar10SearchDataset()
232
+ #trainer = pl.Trainer()
233
+ trainer.fit(model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234