Spaces:
Runtime error
Runtime error
File size: 3,380 Bytes
ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 ff522d1 e9321a8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import torch
from torch import nn
import torch.nn.functional as F
class NeuralNetwork(nn.Module):
def __init__(self):
super().__init__()
n_filters = 64
self.conv_1 = nn.Conv1d( 1, n_filters, 8, stride=1, padding='same')
self.norm_1 = nn.BatchNorm1d(n_filters)
self.conv_2 = nn.Conv1d(n_filters, n_filters, 5, stride=1, padding='same')
self.norm_2 = nn.BatchNorm1d(n_filters)
self.conv_3 = nn.Conv1d(n_filters, n_filters, 3, stride=1, padding='same')
self.norm_3 = nn.BatchNorm1d(n_filters)
self.conv_4 = nn.Conv1d( 1, n_filters, 1, stride=1, padding='same') # Expanding for addition
self.norm_4 = nn.BatchNorm1d(n_filters)
self.conv_5 = nn.Conv1d( n_filters, n_filters*2, 8, stride=1, padding='same')
self.norm_5 = nn.BatchNorm1d(n_filters*2)
self.conv_6 = nn.Conv1d(n_filters*2, n_filters*2, 5, stride=1, padding='same')
self.norm_6 = nn.BatchNorm1d(n_filters*2)
self.conv_7 = nn.Conv1d(n_filters*2, n_filters*2, 3, stride=1, padding='same')
self.norm_7 = nn.BatchNorm1d(n_filters*2)
self.conv_8 = nn.Conv1d( n_filters, n_filters*2, 1, stride=1, padding='same')
self.norm_8 = nn.BatchNorm1d(n_filters*2)
self.conv_9 = nn.Conv1d(n_filters*2, n_filters*2, 8, stride=1, padding='same')
self.norm_9 = nn.BatchNorm1d(n_filters*2)
self.conv_10 = nn.Conv1d(n_filters*2, n_filters*2, 5, stride=1, padding='same')
self.norm_10 = nn.BatchNorm1d(n_filters*2)
self.conv_11 = nn.Conv1d(n_filters*2, n_filters*2, 3, stride=1, padding='same')
self.norm_11 = nn.BatchNorm1d(n_filters*2)
# self.conv_12 = nn.Conv1d(n_filters*2, n_filters*2, 1, stride=1, padding='same')
self.norm_12 = nn.BatchNorm1d(n_filters*2)
self.classifier = nn.Linear(128, 7)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
x = x.float()
# Block 1
a = self.conv_1(x)
a = self.norm_1(a)
a = F.relu(a)
b = self.conv_2(a)
b = self.norm_2(b)
b = F.relu(b)
c = self.conv_3(b)
c = self.norm_3(c)
shortcut = self.conv_4(x)
shortcut = self.norm_4(shortcut)
output_1 = torch.add(c, shortcut)
output_1 = F.relu(output_1)
#Block 2
a = self.conv_5(output_1)
a = self.norm_5(a)
a = F.relu(a)
b = self.conv_6(a)
b = self.norm_6(b)
b = F.relu(b)
c = self.conv_7(b)
c = self.norm_7(c)
shortcut = self.conv_8(output_1)
shortcut = self.norm_8(shortcut)
output_2 = torch.add(c, shortcut)
output_2 = F.relu(output_2)
#Block 3
a = self.conv_9(output_2)
a = self.norm_9(a)
a = F.relu(a)
b = self.conv_10(a)
b = self.norm_10(b)
b = F.relu(b)
c = self.conv_11(b)
c = self.norm_11(c)
# shortcut = self.conv_12(output_2)
shortcut = self.norm_12(shortcut)
output_3 = torch.add(c, shortcut)
output_3 = F.relu(output_3)
logits = self.classifier(output_3.mean((2,)))
res = self.log_softmax(logits)
return res |