Spaces:
Runtime error
Runtime error
Stepp1
commited on
Commit
·
756da91
1
Parent(s):
c0f8ae8
[app] model and predict update
Browse files
app.py
CHANGED
@@ -1,42 +1,49 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
import torch.nn as nn
|
4 |
from torchvision import transforms
|
5 |
-
from torchvision.models import resnet18
|
6 |
from transferwee import download
|
7 |
|
8 |
-
|
9 |
-
model.fc = nn.Sequential(
|
10 |
-
nn.Linear(512, 16),
|
11 |
-
nn.ReLU(),
|
12 |
-
nn.Linear(16,1)
|
13 |
-
)
|
14 |
|
15 |
# download latest model
|
16 |
-
|
17 |
-
# download("https://we.tl/t-25s74dahjU", "best.pt") # 4 --> 0.92
|
18 |
|
19 |
-
|
20 |
-
|
|
|
21 |
model.eval()
|
22 |
|
23 |
labels_to_class = {
|
24 |
0: "normal",
|
25 |
1: "risk"
|
26 |
}
|
|
|
|
|
27 |
def predict(inp):
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
with torch.no_grad():
|
|
|
30 |
prediction = torch.sigmoid(model(inp)[0])
|
31 |
-
|
|
|
|
|
32 |
confidences = {
|
33 |
-
"
|
34 |
}
|
35 |
-
|
36 |
else:
|
37 |
confidences = {
|
38 |
-
"
|
39 |
}
|
|
|
40 |
|
41 |
print(confidences)
|
42 |
return confidences
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
|
|
3 |
from torchvision import transforms
|
|
|
4 |
from transferwee import download
|
5 |
|
6 |
+
from models import Final_CNN_Model
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
# download latest model
|
9 |
+
download("https://we.tl/t-uc4MWbAzIJ", "best.pt")
|
|
|
10 |
|
11 |
+
model = Final_CNN_Model()
|
12 |
+
checkpoint = torch.load("best.pt")
|
13 |
+
model.load_state_dict(checkpoint['model_state_dict'])
|
14 |
model.eval()
|
15 |
|
16 |
labels_to_class = {
|
17 |
0: "normal",
|
18 |
1: "risk"
|
19 |
}
|
20 |
+
|
21 |
+
|
22 |
def predict(inp):
|
23 |
+
tranforms_pipe = transforms.Compose([
|
24 |
+
transforms.ToTensor(),
|
25 |
+
transforms.Resize((224,224))
|
26 |
+
])
|
27 |
+
inp = tranforms_pipe(inp) # [C, H, W]
|
28 |
+
shape = inp.shape
|
29 |
+
|
30 |
+
# [1, C, H, W]
|
31 |
+
serie = torch.Tensor(1, shape[0], shape[1] , shape[2])
|
32 |
+
|
33 |
with torch.no_grad():
|
34 |
+
inp = serie.unsqueeze(0) # [B, 1, C, H, W]
|
35 |
prediction = torch.sigmoid(model(inp)[0])
|
36 |
+
print(prediction)
|
37 |
+
|
38 |
+
if prediction > 0.5:
|
39 |
confidences = {
|
40 |
+
"Riesgo": float(prediction[0])
|
41 |
}
|
|
|
42 |
else:
|
43 |
confidences = {
|
44 |
+
"Normal": float(prediction[0])
|
45 |
}
|
46 |
+
|
47 |
|
48 |
print(confidences)
|
49 |
return confidences
|
models.py
ADDED
@@ -0,0 +1,458 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
import torch.nn.functional as F
|
3 |
+
import torchvision.transforms as transforms
|
4 |
+
from torchvision import models
|
5 |
+
import torchvision
|
6 |
+
import torch
|
7 |
+
import copy
|
8 |
+
class ForkliftFrameClassifier_V0(nn.Module):
|
9 |
+
def __init__(self, n_classes = 2, dropout = 0.15):
|
10 |
+
super(ForkliftFrameClassifier_V0, self).__init__()
|
11 |
+
self.dropout = dropout
|
12 |
+
# N x 3 x 480 x 640
|
13 |
+
|
14 |
+
self.Conv1 = nn.Conv2d(3, 32, kernel_size=(8,8), stride=(3,5), padding=(3,1))
|
15 |
+
self.Bn1 = nn.BatchNorm2d(32)
|
16 |
+
# N x 32 x 160 x 127
|
17 |
+
|
18 |
+
self.Conv2 = nn.Conv2d(32, 64, kernel_size=(8,8), stride=(5,5), padding=(0,0))
|
19 |
+
self.Bn2 = nn.BatchNorm2d(64)
|
20 |
+
# N x 64 x 31 x 24
|
21 |
+
self.Maxpool1 = nn.MaxPool2d(kernel_size=(5,5), stride=(3,3), padding=(0,2))
|
22 |
+
# N x 64 x 9 x 8
|
23 |
+
|
24 |
+
self.Conv3 = nn.Conv2d(64, 64, kernel_size=(5,5), stride=(3,3), padding=(1,2))
|
25 |
+
self.Bn3 = nn.BatchNorm2d(64)
|
26 |
+
# N x 64 x 3 x 3
|
27 |
+
self.Maxpool2 = nn.MaxPool2d(kernel_size=(3,3), stride=(1,1), padding=(0,0))
|
28 |
+
# N x 64 x 1 x 1
|
29 |
+
|
30 |
+
self.Linear1 = nn.Linear(64, 16)
|
31 |
+
|
32 |
+
self.FC_out = nn.Linear(16, 1) if n_classes==2 else nn.Linear(64, n_classes)
|
33 |
+
|
34 |
+
|
35 |
+
def forward(self, x):
|
36 |
+
#print(x.shape)
|
37 |
+
x = self.Conv1(x)
|
38 |
+
x = self.Bn1(x)
|
39 |
+
x = F.relu(x)
|
40 |
+
x = F.dropout(x,self.dropout)
|
41 |
+
#print(x.shape)
|
42 |
+
x = self.Conv2(x)
|
43 |
+
x = self.Bn2(x)
|
44 |
+
x = F.relu(x)
|
45 |
+
x = F.dropout(x,self.dropout)
|
46 |
+
#print(x.shape)
|
47 |
+
x = self.Maxpool1(x)
|
48 |
+
#print(x.shape)
|
49 |
+
x = self.Conv3(x)
|
50 |
+
x = self.Bn3(x)
|
51 |
+
x = F.relu(x)
|
52 |
+
x = F.dropout(x,self.dropout)
|
53 |
+
#print(x.shape)
|
54 |
+
x = self.Maxpool2(x)
|
55 |
+
#print(x.shape)
|
56 |
+
x = x.reshape(x.shape[0], -1)
|
57 |
+
#print(x.shape)
|
58 |
+
x = self.Linear1(x)
|
59 |
+
x = F.dropout(x,self.dropout)
|
60 |
+
#print(x.shape)
|
61 |
+
x = self.FC_out(x)
|
62 |
+
|
63 |
+
return x
|
64 |
+
|
65 |
+
|
66 |
+
class ForkliftFrameClassifier_V1(nn.Module):
|
67 |
+
def __init__(self, n_classes = 2, dropout = 0.15):
|
68 |
+
super(ForkliftFrameClassifier_V1, self).__init__()
|
69 |
+
self.dropout = dropout
|
70 |
+
# N x 3 x 240 x 240
|
71 |
+
|
72 |
+
self.Conv1 = nn.Conv2d(3, 32, kernel_size=(5,5), stride=(3,3), padding=(1,1))
|
73 |
+
self.Bn1 = nn.BatchNorm2d(32)
|
74 |
+
# N x 32 x 80 x 80
|
75 |
+
|
76 |
+
self.Conv2 = nn.Conv2d(32, 64, kernel_size=(5,5), stride=(3,3), padding=(1,1))
|
77 |
+
self.Bn2 = nn.BatchNorm2d(64)
|
78 |
+
# N x 64 x 26 x 26
|
79 |
+
self.Maxpool1 = nn.MaxPool2d(kernel_size=(5,5), stride=(3,3), padding=(1,1))
|
80 |
+
# N x 64 x 8 x 8
|
81 |
+
|
82 |
+
self.Conv3 = nn.Conv2d(64, 32, kernel_size=(3,3), stride=(2,2), padding=(1,1))
|
83 |
+
self.Bn3 = nn.BatchNorm2d(32)
|
84 |
+
# N x 64 x 4 x 4
|
85 |
+
self.Maxpool2 = nn.MaxPool2d(kernel_size=(4,4), stride=(1,1), padding=(0,0))
|
86 |
+
# N x 64 x 1 x 1
|
87 |
+
|
88 |
+
#self.Linear1 = nn.Linear(64, 16)
|
89 |
+
|
90 |
+
self.FC_out = nn.Linear(32, 1) if n_classes==2 else nn.Linear(64, n_classes)
|
91 |
+
|
92 |
+
|
93 |
+
def forward(self, x):
|
94 |
+
#print(x.shape)
|
95 |
+
#print(x.shape)
|
96 |
+
x = self.Conv1(x)
|
97 |
+
x = self.Bn1(x)
|
98 |
+
x = F.relu(x)
|
99 |
+
x = F.dropout(x,self.dropout)
|
100 |
+
#print(x.shape)
|
101 |
+
x = self.Conv2(x)
|
102 |
+
x = self.Bn2(x)
|
103 |
+
x = F.relu(x)
|
104 |
+
x = F.dropout(x,self.dropout)
|
105 |
+
#print(x.shape)
|
106 |
+
x = self.Maxpool1(x)
|
107 |
+
#print(x.shape)
|
108 |
+
x = self.Conv3(x)
|
109 |
+
x = self.Bn3(x)
|
110 |
+
x = F.relu(x)
|
111 |
+
x = F.dropout(x,self.dropout)
|
112 |
+
#print(x.shape)
|
113 |
+
x = self.Maxpool2(x)
|
114 |
+
#print(x.shape)
|
115 |
+
x = x.reshape(x.shape[0], -1)
|
116 |
+
#print(x.shape)
|
117 |
+
#x = self.Linear1(x)
|
118 |
+
#x = F.dropout(x,self.dropout)
|
119 |
+
#print(x.shape)
|
120 |
+
x = self.FC_out(x)
|
121 |
+
|
122 |
+
return x
|
123 |
+
|
124 |
+
|
125 |
+
class ForkliftFrameClassifier_V2(nn.Module):
|
126 |
+
def __init__(self, n_classes = 2, dropout = 0.15):
|
127 |
+
super(ForkliftFrameClassifier_V2, self).__init__()
|
128 |
+
self.dropout = dropout
|
129 |
+
# N x 3 x 150 x 150
|
130 |
+
|
131 |
+
self.Conv1 = nn.Conv2d(3, 32, kernel_size=(5,5), stride=(3,3), padding=(1,1))
|
132 |
+
self.Bn1 = nn.BatchNorm2d(32)
|
133 |
+
# N x 32 x 50 x 50
|
134 |
+
|
135 |
+
self.Conv2 = nn.Conv2d(32, 64, kernel_size=(5,5), stride=(3,3), padding=(1,1))
|
136 |
+
self.Bn2 = nn.BatchNorm2d(64)
|
137 |
+
# N x 64 x 16 x 16
|
138 |
+
self.Maxpool1 = nn.MaxPool2d(kernel_size=(3,3), stride=(2,2), padding=(1,1))
|
139 |
+
# N x 64 x 8 x 8
|
140 |
+
|
141 |
+
self.Conv3 = nn.Conv2d(64, 32, kernel_size=(3,3), stride=(2,2), padding=(1,1))
|
142 |
+
self.Bn3 = nn.BatchNorm2d(32)
|
143 |
+
# N x 64 x 4 x 4
|
144 |
+
self.Maxpool2 = nn.MaxPool2d(kernel_size=(4,4), stride=(1,1), padding=(0,0))
|
145 |
+
# N x 64 x 1 x 1
|
146 |
+
|
147 |
+
#self.Linear1 = nn.Linear(64, 16)
|
148 |
+
|
149 |
+
self.FC_out = nn.Linear(32, 1) if n_classes==2 else nn.Linear(64, n_classes)
|
150 |
+
|
151 |
+
|
152 |
+
def forward(self, x):
|
153 |
+
#print(x.shape)
|
154 |
+
#print(x.shape)
|
155 |
+
x = self.Conv1(x)
|
156 |
+
x = self.Bn1(x)
|
157 |
+
x = F.relu(x)
|
158 |
+
x = F.dropout(x,self.dropout)
|
159 |
+
#print(x.shape)
|
160 |
+
x = self.Conv2(x)
|
161 |
+
x = self.Bn2(x)
|
162 |
+
x = F.relu(x)
|
163 |
+
x = F.dropout(x,self.dropout)
|
164 |
+
#print(x.shape)
|
165 |
+
x = self.Maxpool1(x)
|
166 |
+
#print(x.shape)
|
167 |
+
x = self.Conv3(x)
|
168 |
+
x = self.Bn3(x)
|
169 |
+
x = F.relu(x)
|
170 |
+
x = F.dropout(x,self.dropout)
|
171 |
+
#print(x.shape)
|
172 |
+
x = self.Maxpool2(x)
|
173 |
+
#print(x.shape)
|
174 |
+
x = x.reshape(x.shape[0], -1)
|
175 |
+
#print(x.shape)
|
176 |
+
#x = self.Linear1(x)
|
177 |
+
#x = F.dropout(x,self.dropout)
|
178 |
+
#print(x.shape)
|
179 |
+
x = self.FC_out(x)
|
180 |
+
|
181 |
+
return x
|
182 |
+
|
183 |
+
|
184 |
+
class ForkliftFrameClassifier_PT1(nn.Module):
|
185 |
+
def __init__(self, pretrained_model,n_out_last_layer, n_classes = 2):
|
186 |
+
super(ForkliftFrameClassifier_PT1, self).__init__()
|
187 |
+
self.pt_model = pretrained_model
|
188 |
+
self.pt_model.classifier = nn.Linear(25088,1) if n_classes==2 else nn.Linear(n_out_last_layer, n_classes)
|
189 |
+
for param in self.pt_model.classifier.parameters():
|
190 |
+
param.requires_grad = False
|
191 |
+
|
192 |
+
def forward(self, x):
|
193 |
+
x = self.pt_model(x)
|
194 |
+
return x
|
195 |
+
|
196 |
+
|
197 |
+
class CNN_Feature_Extractor(nn.Module):
|
198 |
+
def __init__(self):
|
199 |
+
super(CNN_Feature_Extractor, self).__init__()
|
200 |
+
|
201 |
+
self.conv1 = nn.Conv2d(3, 10, kernel_size=(5,5), stride=(3,3))
|
202 |
+
self.conv2 = nn.Conv2d(10, 20, kernel_size=(5,5), stride=(2,2))
|
203 |
+
self.conv3 = nn.Conv2d(20, 30, kernel_size=(5,5), stride=(2,2))
|
204 |
+
|
205 |
+
def forward(self, i):
|
206 |
+
x = i.view(-1, i.shape[2], i.shape[3], i.shape[4])
|
207 |
+
x = F.relu(self.conv1(x))
|
208 |
+
x = F.relu(self.conv2(x))
|
209 |
+
x = F.relu(self.conv3(x))
|
210 |
+
x = nn.AvgPool2d(3)(x)
|
211 |
+
x = x.view(i.shape[0], i.shape[1], -1)
|
212 |
+
return x
|
213 |
+
|
214 |
+
class LSTM(nn.Module):
|
215 |
+
def __init__(self, seq_len, hidden_size,out_size):
|
216 |
+
super(LSTM, self).__init__()
|
217 |
+
self.lstm = nn.LSTM(750, hidden_size)
|
218 |
+
self.fc = nn.Linear(seq_len*hidden_size, out_size)
|
219 |
+
|
220 |
+
def forward(self, x):
|
221 |
+
x, _ = self.lstm(x)
|
222 |
+
x = x.view(x.shape[0], -1)
|
223 |
+
x = self.fc(x)
|
224 |
+
return x
|
225 |
+
|
226 |
+
|
227 |
+
class Full_LSTM(nn.Module):
|
228 |
+
def __init__(self,seq_len = 15, hidden_size = 100, out_size = 512):
|
229 |
+
super(Full_LSTM, self).__init__()
|
230 |
+
self.net_cnn = CNN_Feature_Extractor()
|
231 |
+
self.net_lstm = LSTM(seq_len, hidden_size, out_size)
|
232 |
+
self.classifier = nn.Sequential(nn.Linear(out_size, 16),
|
233 |
+
nn.Dropout(0.3),
|
234 |
+
nn.ReLU(),
|
235 |
+
nn.Linear(16,1))
|
236 |
+
|
237 |
+
def forward(self, x):
|
238 |
+
# x.size() == (B,L,C,H,W)
|
239 |
+
# B : Batch size
|
240 |
+
# L : Sequence Length = 15
|
241 |
+
# C : Channels = 3
|
242 |
+
# H : Heigth = 224
|
243 |
+
# W : Width = 224
|
244 |
+
x = self.net_cnn(x)
|
245 |
+
x = self.net_lstm(x)
|
246 |
+
x = self.classifier(x)
|
247 |
+
return x
|
248 |
+
|
249 |
+
class Full_CNN(nn.Module):
|
250 |
+
def __init__(self):
|
251 |
+
super(Full_CNN, self).__init__()
|
252 |
+
self.model = torchvision.models.resnet18(pretrained=True)
|
253 |
+
#for param in model.parameters():
|
254 |
+
# param.requires_grad = False
|
255 |
+
self.model.fc = nn.Sequential(nn.Linear(512, 16),
|
256 |
+
nn.Dropout(0.3),
|
257 |
+
nn.ReLU(),
|
258 |
+
nn.Linear(16,1))
|
259 |
+
|
260 |
+
def forward(self, x):
|
261 |
+
# x.size() == (B,L,C,H,W)
|
262 |
+
# B : Batch size
|
263 |
+
# L : Sequence Length = 15
|
264 |
+
# C : Channels = 3
|
265 |
+
# H : Heigth = 224
|
266 |
+
# W : Width = 224
|
267 |
+
|
268 |
+
x = self.model(x[:,0,:])
|
269 |
+
|
270 |
+
return x
|
271 |
+
|
272 |
+
|
273 |
+
class Full_Model(nn.Module):
|
274 |
+
def __init__(self,seq_len = 15, hidden_size = 100, classifier_size = 512, dropout = 0.4, cnn_model_path = None, lstm_model_path= None):
|
275 |
+
super(Full_Model, self).__init__()
|
276 |
+
self.CNN_Part = Full_CNN()
|
277 |
+
|
278 |
+
if cnn_model_path is not None:
|
279 |
+
self.CNN_Part.model.load_state_dict(torch.load(cnn_model_path)['model_state_dict'])
|
280 |
+
self.CNN_classifier = copy.deepcopy(self.CNN_Part.model.fc)
|
281 |
+
else:
|
282 |
+
self.CNN_classifier = nn.Sequential(nn.Linear(classifier_size, 16),
|
283 |
+
nn.Dropout(dropout),
|
284 |
+
nn.ReLU(),
|
285 |
+
nn.Linear(16,1))
|
286 |
+
|
287 |
+
|
288 |
+
self.CNN_Part.model.fc = nn.Sequential(nn.Linear(classifier_size, classifier_size),
|
289 |
+
nn.Dropout(dropout),
|
290 |
+
nn.ReLU())
|
291 |
+
|
292 |
+
|
293 |
+
|
294 |
+
self.LSTM_Part = Full_LSTM(seq_len, hidden_size, classifier_size)
|
295 |
+
|
296 |
+
if lstm_model_path is not None:
|
297 |
+
self.LSTM_Part.load_state_dict(torch.load(lstm_model_path)['model_state_dict'])
|
298 |
+
self.LSTM_classifier = copy.deepcopy(self.LSTM_Part.classifier)
|
299 |
+
else:
|
300 |
+
self.LSTM_classifier = nn.Sequential(nn.Linear(classifier_size, 16),
|
301 |
+
nn.Dropout(dropout),
|
302 |
+
nn.ReLU(),
|
303 |
+
nn.Linear(16,1))
|
304 |
+
|
305 |
+
self.Finalclassifier = nn.Sequential(nn.Linear(classifier_size, 16),
|
306 |
+
nn.Dropout(dropout),
|
307 |
+
nn.ReLU(),
|
308 |
+
nn.Linear(16,1))
|
309 |
+
|
310 |
+
|
311 |
+
|
312 |
+
def forward(self, x):
|
313 |
+
# x.size() == (B,L,C,H,W)
|
314 |
+
# B : Batch size
|
315 |
+
# L : Sequence Length = 15
|
316 |
+
# C : Channels = 3
|
317 |
+
# H : Heigth = 224
|
318 |
+
# W : Width = 224
|
319 |
+
cnn_out = self.CNN_Part(x)
|
320 |
+
|
321 |
+
# xcnn : (B,512)
|
322 |
+
lstm_out = self.LSTM_Part.net_cnn(x)
|
323 |
+
lstm_out = self.LSTM_Part.net_lstm(lstm_out)
|
324 |
+
# xlstm : (B,512)
|
325 |
+
|
326 |
+
out = cnn_out + lstm_out
|
327 |
+
|
328 |
+
cnn_out = self.CNN_classifier(cnn_out)
|
329 |
+
lstm_out = self.LSTM_classifier(lstm_out)
|
330 |
+
out = self.Finalclassifier(out)
|
331 |
+
|
332 |
+
return (cnn_out, lstm_out, out)
|
333 |
+
|
334 |
+
|
335 |
+
# Se establece que el modelo final consista en cargar un modelo Full, pero considerando solamente la salida y el
|
336 |
+
# forward de la componente LSTM. Esto se hace así (Cargar inclusive los pesos de LSTM) por si en el futuro se decidiera que estos pesos pueden ser útiles.
|
337 |
+
class Final_CNN_Model(nn.Module):
|
338 |
+
def __init__(self,seq_len = 15, hidden_size = 100, classifier_size = 512, dropout = 0.4, cnn_model_path = None, lstm_model_path= None):
|
339 |
+
super(Final_CNN_Model, self).__init__()
|
340 |
+
self.CNN_Part = Full_CNN()
|
341 |
+
|
342 |
+
if cnn_model_path is not None:
|
343 |
+
self.CNN_Part.model.load_state_dict(torch.load(cnn_model_path)['model_state_dict'])
|
344 |
+
self.CNN_classifier = copy.deepcopy(self.CNN_Part.model.fc)
|
345 |
+
else:
|
346 |
+
self.CNN_classifier = nn.Sequential(nn.Linear(classifier_size, 16),
|
347 |
+
nn.Dropout(dropout),
|
348 |
+
nn.ReLU(),
|
349 |
+
nn.Linear(16,1))
|
350 |
+
|
351 |
+
|
352 |
+
self.CNN_Part.model.fc = nn.Sequential(nn.Linear(classifier_size, classifier_size),
|
353 |
+
nn.Dropout(dropout),
|
354 |
+
nn.ReLU())
|
355 |
+
|
356 |
+
|
357 |
+
|
358 |
+
self.LSTM_Part = Full_LSTM(seq_len, hidden_size, classifier_size)
|
359 |
+
|
360 |
+
if lstm_model_path is not None:
|
361 |
+
self.LSTM_Part.load_state_dict(torch.load(lstm_model_path)['model_state_dict'])
|
362 |
+
self.LSTM_classifier = copy.deepcopy(self.LSTM_Part.classifier)
|
363 |
+
else:
|
364 |
+
self.LSTM_classifier = nn.Sequential(nn.Linear(classifier_size, 16),
|
365 |
+
nn.Dropout(dropout),
|
366 |
+
nn.ReLU(),
|
367 |
+
nn.Linear(16,1))
|
368 |
+
|
369 |
+
self.Finalclassifier = nn.Sequential(nn.Linear(classifier_size, 16),
|
370 |
+
nn.Dropout(dropout),
|
371 |
+
nn.ReLU(),
|
372 |
+
nn.Linear(16,1))
|
373 |
+
|
374 |
+
|
375 |
+
|
376 |
+
def forward(self, x):
|
377 |
+
# x.size() == (B,L,C,H,W)
|
378 |
+
# B : Batch size
|
379 |
+
# L : Sequence Length = 15
|
380 |
+
# C : Channels = 3
|
381 |
+
# H : Heigth = 224
|
382 |
+
# W : Width = 224
|
383 |
+
cnn_out = self.CNN_Part(x)
|
384 |
+
|
385 |
+
cnn_out = self.CNN_classifier(cnn_out)
|
386 |
+
|
387 |
+
return cnn_out
|
388 |
+
|
389 |
+
|
390 |
+
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
391 |
+
# torch.cuda.empty_cache()
|
392 |
+
# x1 = torch.rand((64, 15, 3, 224 , 224))
|
393 |
+
# model = Full_Model(cnn_model_path = 'Best_model_4.pt', lstm_model_path= 'Best_model_10.pt')
|
394 |
+
|
395 |
+
# model.to(device)
|
396 |
+
# x1 = x1.to(device)
|
397 |
+
# out1 = model(x1)
|
398 |
+
|
399 |
+
|
400 |
+
|
401 |
+
|
402 |
+
|
403 |
+
|
404 |
+
|
405 |
+
|
406 |
+
|
407 |
+
|
408 |
+
|
409 |
+
|
410 |
+
|
411 |
+
|
412 |
+
|
413 |
+
|
414 |
+
|
415 |
+
|
416 |
+
|
417 |
+
|
418 |
+
|
419 |
+
|
420 |
+
|
421 |
+
|
422 |
+
|
423 |
+
|
424 |
+
|
425 |
+
|
426 |
+
|
427 |
+
|
428 |
+
|
429 |
+
|
430 |
+
|
431 |
+
|
432 |
+
|
433 |
+
|
434 |
+
|
435 |
+
|
436 |
+
|
437 |
+
|
438 |
+
|
439 |
+
|
440 |
+
|
441 |
+
|
442 |
+
|
443 |
+
|
444 |
+
|
445 |
+
|
446 |
+
|
447 |
+
|
448 |
+
|
449 |
+
|
450 |
+
|
451 |
+
|
452 |
+
|
453 |
+
|
454 |
+
|
455 |
+
|
456 |
+
|
457 |
+
|
458 |
+
|