Delete tcn.py
Browse files
tcn.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from torch.nn.utils import weight_norm
|
4 |
-
|
5 |
-
|
6 |
-
class Chomp1d(nn.Module):
|
7 |
-
def __init__(self, chomp_size):
|
8 |
-
super(Chomp1d, self).__init__()
|
9 |
-
self.chomp_size = chomp_size
|
10 |
-
|
11 |
-
def forward(self, x) -> object:
|
12 |
-
return x[:, :, :-self.chomp_size].contiguous()
|
13 |
-
|
14 |
-
|
15 |
-
class TemporalBlock(nn.Module):
|
16 |
-
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
|
17 |
-
super(TemporalBlock, self).__init__()
|
18 |
-
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
|
19 |
-
stride=stride, padding=padding, dilation=dilation))
|
20 |
-
self.chomp1 = Chomp1d(padding)
|
21 |
-
self.relu1 = nn.ReLU(inplace=False)
|
22 |
-
self.dropout1 = nn.Dropout(dropout)
|
23 |
-
|
24 |
-
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
|
25 |
-
stride=stride, padding=padding, dilation=dilation))
|
26 |
-
self.chomp2 = Chomp1d(padding)
|
27 |
-
self.relu2 = nn.ReLU(inplace=False)
|
28 |
-
self.dropout2 = nn.Dropout(dropout)
|
29 |
-
|
30 |
-
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
|
31 |
-
self.conv2, self.chomp2, self.relu2, self.dropout2)
|
32 |
-
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
|
33 |
-
self.relu = nn.ReLU(inplace=False)
|
34 |
-
self.init_weights()
|
35 |
-
|
36 |
-
def init_weights(self):
|
37 |
-
self.conv1.weight.data.normal_(0, 0.01)
|
38 |
-
self.conv2.weight.data.normal_(0, 0.01)
|
39 |
-
if self.downsample is not None:
|
40 |
-
self.downsample.weight.data.normal_(0, 0.01)
|
41 |
-
|
42 |
-
def forward(self, x) -> object:
|
43 |
-
out = self.net(x)
|
44 |
-
res = x if self.downsample is None else self.downsample(x)
|
45 |
-
return self.relu(out + res)
|
46 |
-
|
47 |
-
|
48 |
-
class TemporalConvNet(nn.Module):
|
49 |
-
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
|
50 |
-
super(TemporalConvNet, self).__init__()
|
51 |
-
layers = []
|
52 |
-
num_levels = len(num_channels)
|
53 |
-
for i in range(num_levels):
|
54 |
-
dilation_size = 2 ** i
|
55 |
-
in_channels = num_inputs if i == 0 else num_channels[i-1]
|
56 |
-
out_channels = num_channels[i]
|
57 |
-
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
|
58 |
-
padding=(kernel_size-1) * dilation_size, dropout=dropout)]
|
59 |
-
|
60 |
-
self.network = nn.Sequential(*layers)
|
61 |
-
|
62 |
-
def forward(self, x) -> object:
|
63 |
-
return self.network(x)
|
64 |
-
|
65 |
-
|
66 |
-
class TCN(nn.Module):
|
67 |
-
def __init__(self, input_size, output_size, num_channels, kernel_size=2, dropout=0):
|
68 |
-
super(TCN, self).__init__()
|
69 |
-
self.tcn = TemporalConvNet(num_inputs=input_size,
|
70 |
-
num_channels=num_channels,
|
71 |
-
kernel_size=kernel_size,
|
72 |
-
dropout=dropout)
|
73 |
-
self.linear = nn.Linear(num_channels[-1], output_size)
|
74 |
-
self.init_weights()
|
75 |
-
|
76 |
-
def init_weights(self):
|
77 |
-
self.linear.weight.data.normal_(0, 0.01)
|
78 |
-
self.linear.bias.data.fill_(0)
|
79 |
-
|
80 |
-
def forward(self, inputs) -> object:
|
81 |
-
y = self.tcn.forward(inputs)
|
82 |
-
output = self.linear(y[:, :, -1])
|
83 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|