SahithiR commited on
Commit
14e953b
·
1 Parent(s): d690f9b

Create S10.py

Browse files
Files changed (1) hide show
  1. S10.py +161 -0
S10.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import torch.optim as optim
6
+ from torchvision import datasets, transforms
7
+ import os
8
+ os.environ['KMP_DUPLICATE_LIB_OK']='True'
9
+ import pytorch_lightening as pl
10
+
11
+ #MODEL
12
+ class LtAutoEncoder(L.LighteningModule):
13
+ def __init__(self):
14
+ super(Net,self).__init__()
15
+ self.conv1 = nn.Sequential (
16
+ nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,bias=False),
17
+ nn.BatchNorm2d(64),
18
+ nn.ReLU(inplace=True)
19
+ ) # Number of Parameters = 3*3*3*64=1728
20
+ # Layer 1
21
+ self.conv11 = nn.Sequential(
22
+ nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1,bias=False),
23
+ nn.MaxPool2d(kernel_size=2,stride=2),
24
+ nn.BatchNorm2d(128),
25
+ nn.ReLU(inplace=True)
26
+ ) # Number of Parameters = 3*3*64*128 = 73728
27
+ self.conv12 = nn.Sequential(
28
+ nn.Conv2d(128,128, kernel_size=3, stride=1, padding=1,bias=False),# Number of Parameters = 3*3*64*128 = 73728
29
+ nn.BatchNorm2d(128),
30
+ nn.ReLU(inplace=True),
31
+ nn.Conv2d(128,128, kernel_size=3, stride=1, padding=1,bias=False),# Number of Parameters = 3*3*64*128 = 73728
32
+ nn.BatchNorm2d(128),
33
+ nn.ReLU(inplace=True)
34
+ )
35
+
36
+ # Layer 2
37
+ self.conv2 = nn.Sequential(
38
+ nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1,bias=False),
39
+ nn.MaxPool2d(kernel_size=2,stride=2),
40
+ nn.BatchNorm2d(256),
41
+ nn.ReLU(inplace=True)
42
+ )
43
+
44
+ # Layer 3
45
+ self.conv31 = nn.Sequential(
46
+ nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1,bias=False),
47
+ nn.MaxPool2d(kernel_size=2,stride=2),
48
+ nn.BatchNorm2d(512),
49
+ nn.ReLU(inplace=True)
50
+ )
51
+ self.conv32 = nn.Sequential(
52
+ nn.Conv2d(512,512, kernel_size=3, stride=1, padding=1,bias=False),
53
+ nn.BatchNorm2d(512),
54
+ nn.ReLU(inplace=True),
55
+ nn.Conv2d(512,512, kernel_size=3, stride=1, padding=1,bias=False),
56
+ nn.BatchNorm2d(512),
57
+ nn.ReLU(inplace=True)
58
+ )
59
+
60
+ self.maxpool = nn.MaxPool2d(kernel_size=4,stride=2)
61
+
62
+ # Fully connected
63
+ self.fc = nn.Linear(512, 10, bias=True)
64
+
65
+ def forward(self, x):
66
+ x = self.conv1(x)
67
+
68
+ x = self.conv11(x)
69
+ R1=x
70
+ x = self.conv12(x)
71
+ x=x+R1
72
+
73
+ x = self.conv2(x)
74
+
75
+ x = self.conv31(x)
76
+ R2=x
77
+ x = self.conv32(x)
78
+ x=x+R2
79
+
80
+ x = self.maxpool(x)
81
+
82
+ x = x.squeeze(dim=[2, 3])
83
+
84
+ x = self.fc(x)
85
+
86
+ x = x.view(-1, 10)
87
+ return x
88
+
89
+ def training_step(self, batch, batch_idx):
90
+ # training_step defines the train loop.
91
+ # It is independent of forward
92
+ train_losses = []
93
+ train_acc = []
94
+ model.train()
95
+ pbar = tqdm(train_loader)
96
+ correct = 0
97
+ processed = 0
98
+ criterion = nn.CrossEntropyLoss()
99
+ for batch_idx, (data, target) in enumerate(pbar):
100
+ # get samples
101
+ data, target = data.to(device), target.to(device)
102
+ loss = criterion(y_pred, target)
103
+
104
+ train_losses.append(loss)
105
+ optimizer.zero_grad()
106
+ # Backpropagation
107
+ loss.backward()
108
+ optimizer.step()
109
+
110
+ pred = y_pred.argmax(dim=1, keepdim=True) # get the index of the max log-probability
111
+ correct += pred.eq(target.view_as(pred)).sum().item()
112
+ processed += len(data)
113
+
114
+ pbar.set_description(desc= f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')
115
+ train_acc.append(100*correct/processed)
116
+
117
+
118
+ def test_model(self, batch, batch_idx):
119
+ test_losses = []
120
+ test_acc = []
121
+ model.eval()
122
+ test_loss = 0
123
+ correct = 0
124
+ with torch.no_grad():
125
+ for data, target in test_loader:
126
+ data, target = data.to(device), target.to(device)
127
+ output = model(data)
128
+ pred =output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
129
+ correct += pred.eq(target.view_as(pred)).sum().item()
130
+
131
+ test_loss /= len(test_loader.dataset)
132
+ test_losses.append(test_loss)
133
+
134
+ print('\nTest set: Accuracy: {}/{} ({:.2f}%)\n'.format(
135
+ correct, len(test_loader.dataset),
136
+ 100. * correct / len(test_loader.dataset)))
137
+
138
+ test_acc.append(100. * correct / len(test_loader.dataset))
139
+
140
+
141
+ #OPTIMIZER
142
+ def configure_optimizers(self):
143
+ optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-4)
144
+ return optimizer
145
+
146
+ model=LtAutoEncoder(Net)
147
+
148
+
149
+ # setup data
150
+ dataset = MNIST(os.getcwd(), download=True, transform=ToTensor())
151
+ train_loader = utils.data.DataLoader(dataset)
152
+
153
+ # setup data
154
+ dataset = CIFAR10(os.getcwd(), download=True, transform=ToTensor())
155
+ train_loader = utils.data.DataLoader(dataset)
156
+
157
+ # train the model (hint: here are some helpful Trainer arguments for rapid idea iteration)
158
+ trainer = pl.Trainer(limit_train_batches=100, max_epochs=1)
159
+ trainer.fit(model=autoencoder, train_dataloaders=train_loader)
160
+
161
+