Spaces:
Runtime error
Runtime error
Update S10.py
Browse files
S10.py
CHANGED
@@ -9,7 +9,7 @@ os.environ['KMP_DUPLICATE_LIB_OK']='True'
|
|
9 |
import pytorch_lightening as pl
|
10 |
|
11 |
#MODEL
|
12 |
-
class
|
13 |
def __init__(self):
|
14 |
super(Net,self).__init__()
|
15 |
self.conv1 = nn.Sequential (
|
@@ -86,72 +86,73 @@ class LtAutoEncoder(L.LighteningModule):
|
|
86 |
x = x.view(-1, 10)
|
87 |
return x
|
88 |
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
# Backpropagation
|
107 |
-
loss.backward()
|
108 |
-
optimizer.step()
|
109 |
-
|
110 |
-
pred = y_pred.argmax(dim=1, keepdim=True) # get the index of the max log-probability
|
111 |
-
correct += pred.eq(target.view_as(pred)).sum().item()
|
112 |
-
processed += len(data)
|
113 |
-
|
114 |
-
pbar.set_description(desc= f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')
|
115 |
-
train_acc.append(100*correct/processed)
|
116 |
-
|
117 |
-
|
118 |
-
def testing_step(self, batch, batch_idx):
|
119 |
-
test_losses = []
|
120 |
-
test_acc = []
|
121 |
-
model.eval()
|
122 |
-
test_loss = 0
|
123 |
-
correct = 0
|
124 |
-
with torch.no_grad():
|
125 |
-
for data, target in test_loader:
|
126 |
-
data, target = data.to(device), target.to(device)
|
127 |
-
output = model(data)
|
128 |
-
pred =output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
|
129 |
-
correct += pred.eq(target.view_as(pred)).sum().item()
|
130 |
-
|
131 |
-
test_loss /= len(test_loader.dataset)
|
132 |
-
test_losses.append(test_loss)
|
133 |
-
|
134 |
-
print('\nTest set: Accuracy: {}/{} ({:.2f}%)\n'.format(
|
135 |
-
correct, len(test_loader.dataset),
|
136 |
-
100. * correct / len(test_loader.dataset)))
|
137 |
-
|
138 |
-
test_acc.append(100. * correct / len(test_loader.dataset))
|
139 |
-
|
140 |
|
141 |
#OPTIMIZER
|
142 |
def configure_optimizers(self):
|
143 |
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-4)
|
144 |
return optimizer
|
145 |
|
146 |
-
|
147 |
|
148 |
|
149 |
-
|
150 |
-
dataset = CIFAR10(os.getcwd(), download=True, transform=ToTensor())
|
151 |
-
train_loader = utils.data.DataLoader(dataset)
|
152 |
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
|
|
|
9 |
import pytorch_lightening as pl
|
10 |
|
11 |
#MODEL
|
12 |
+
class LitAutoEncoder(L.LighteningModule):
|
13 |
def __init__(self):
|
14 |
super(Net,self).__init__()
|
15 |
self.conv1 = nn.Sequential (
|
|
|
86 |
x = x.view(-1, 10)
|
87 |
return x
|
88 |
|
89 |
+
|
90 |
+
def cross_entropy_loss(self, logits, labels):
|
91 |
+
return F.nll_loss(logits, labels)
|
92 |
+
|
93 |
+
def training_step(self, train_batch, batch_idx):
|
94 |
+
x, y = train_batch
|
95 |
+
logits = self.forward(x)
|
96 |
+
loss = self.cross_entropy_loss(logits, y)
|
97 |
+
self.log('train_loss', loss)
|
98 |
+
return loss
|
99 |
+
|
100 |
+
|
101 |
+
def validation_step(self, val_batch, batch_idx):
|
102 |
+
x, y = val_batch
|
103 |
+
logits = self.forward(x)
|
104 |
+
loss = self.cross_entropy_loss(logits, y)
|
105 |
+
self.log('val_loss', loss)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
|
107 |
#OPTIMIZER
|
108 |
def configure_optimizers(self):
|
109 |
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-4)
|
110 |
return optimizer
|
111 |
|
112 |
+
|
113 |
|
114 |
|
115 |
+
class DataModule(pl.LightningDataModule):
|
|
|
|
|
116 |
|
117 |
+
def setup(self, stage):
|
118 |
+
# transforms for images
|
119 |
+
train_transform=A.Compose([
|
120 |
+
A.Normalize(
|
121 |
+
mean = (0.4914, 0.4822, 0.4465),
|
122 |
+
std = (0.2470, 0.2435, 0.2616), always_apply = True
|
123 |
+
),
|
124 |
+
A.HorizontalFlip(),
|
125 |
+
A.ShiftScaleRotate(shift_limit=(-0.2, 0.2), scale_limit=(-0.2, 0.2), rotate_limit=(-15, 15), p=0.5),
|
126 |
+
A.PadIfNeeded(min_height=36, min_width=36, p=1.0),
|
127 |
+
A.RandomCrop (32, 32, always_apply=False, p=1.0),
|
128 |
+
A.CenterCrop(32, 32, always_apply=False, p=1.0),
|
129 |
+
A.CoarseDropout(max_holes = 1, max_height=8, max_width=8, min_holes = 1, min_height=8, min_width=8,
|
130 |
+
fill_value=(0.4914, 0.4822, 0.4465), always_apply=False, p=0.5),
|
131 |
+
|
132 |
+
ToTensorV2()
|
133 |
+
])
|
134 |
+
|
135 |
+
valid_transform=A.Compose([
|
136 |
+
A.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), always_apply = True),
|
137 |
+
ToTensorV2()
|
138 |
+
])
|
139 |
+
|
140 |
+
|
141 |
+
# prepare transforms standard to MNIST
|
142 |
+
self.mnist_train = CIFAR10(os.getcwd(), train=True, download=True, transform=train_transform)
|
143 |
+
self.mnist_test = CIFAR10(os.getcwd(), train=False, download=True, transform=valid_transform)
|
144 |
+
|
145 |
+
def train_dataloader(self):
|
146 |
+
return DataLoader(self.mnist_train, batch_size=512)
|
147 |
+
|
148 |
+
def val_dataloader(self):
|
149 |
+
return DataLoader(self.mnist_test, batch_size=512)
|
150 |
+
|
151 |
+
data_module = DataModule()
|
152 |
+
|
153 |
+
# train
|
154 |
+
model=LitAutoEncoder()
|
155 |
+
trainer = pl.Trainer()
|
156 |
+
|
157 |
+
trainer.fit(model, data_module)
|
158 |
|