making pretrained class too
Browse files- app.py +10 -2
- model_utils/efficientnet_config.py +39 -128
app.py
CHANGED
@@ -7,6 +7,7 @@ import numpy as np
|
|
7 |
from PIL import Image
|
8 |
from scipy import special
|
9 |
import sys
|
|
|
10 |
from types import SimpleNamespace
|
11 |
from transformers import AutoModel, pipeline
|
12 |
import torch
|
@@ -14,7 +15,7 @@ import torch
|
|
14 |
sys.path.insert(1, "../")
|
15 |
# from utils import model_utils, train_utils, data_utils, run_utils
|
16 |
# from model_utils import jason_regnet_maker, jason_efficientnet_maker
|
17 |
-
from model_utils.efficientnet_config import EfficientNetConfig
|
18 |
|
19 |
model_path = 'chlab/'
|
20 |
# model_path = './models/'
|
@@ -221,10 +222,17 @@ def predict_and_analyze(model_name, num_channels, dim, input_channel, image):
|
|
221 |
|
222 |
config.save_pretrained(model_loading_name)
|
223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
# pipeline = pipeline(task="image-classification", model=model_loading_name)
|
225 |
|
226 |
# model = load_model(model_name, activation=True)
|
227 |
-
model = AutoModel.from_pretrained(model_loading_name)
|
228 |
print("Model loaded")
|
229 |
|
230 |
print("Looking at activations")
|
|
|
7 |
from PIL import Image
|
8 |
from scipy import special
|
9 |
import sys
|
10 |
+
import timm
|
11 |
from types import SimpleNamespace
|
12 |
from transformers import AutoModel, pipeline
|
13 |
import torch
|
|
|
15 |
sys.path.insert(1, "../")
|
16 |
# from utils import model_utils, train_utils, data_utils, run_utils
|
17 |
# from model_utils import jason_regnet_maker, jason_efficientnet_maker
|
18 |
+
from model_utils.efficientnet_config import EfficientNetConfig, EfficientNetPreTrained
|
19 |
|
20 |
model_path = 'chlab/'
|
21 |
# model_path = './models/'
|
|
|
222 |
|
223 |
config.save_pretrained(model_loading_name)
|
224 |
|
225 |
+
config = EfficientNetConfig.from_pretrained(model_loading_name)
|
226 |
+
|
227 |
+
model = EfficientNetPreTrained(config)
|
228 |
+
|
229 |
+
pretrained_model = timm.create_model(model_loading_name, pretrained=True)
|
230 |
+
model.model.load_state_dict(pretrained_model.state_dict())
|
231 |
+
|
232 |
# pipeline = pipeline(task="image-classification", model=model_loading_name)
|
233 |
|
234 |
# model = load_model(model_name, activation=True)
|
235 |
+
# model = AutoModel.from_pretrained(model_loading_name)
|
236 |
print("Model loaded")
|
237 |
|
238 |
print("Looking at activations")
|
model_utils/efficientnet_config.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from transformers import PretrainedConfig
|
2 |
from typing import List
|
3 |
|
4 |
import copy
|
@@ -271,140 +271,51 @@ class EfficientNetConfig(PretrainedConfig):
|
|
271 |
"""
|
272 |
|
273 |
|
274 |
-
self.model = EfficientNet(
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
)
|
283 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
284 |
super().__init__(**kwargs)
|
285 |
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
# )
|
307 |
-
# if kwargs["block"] is not None:
|
308 |
-
# for s in inverted_residual_setting:
|
309 |
-
# if isinstance(s, MBConvConfig):
|
310 |
-
# s.block = kwargs["block"]
|
311 |
-
|
312 |
-
# if norm_layer is None:
|
313 |
-
# norm_layer = nn.BatchNorm2d
|
314 |
-
|
315 |
-
# layers: List[nn.Module] = []
|
316 |
-
|
317 |
-
# # building first layer
|
318 |
-
# firstconv_output_channels = inverted_residual_setting[0].input_channels
|
319 |
-
# layers.append(
|
320 |
-
# Conv2dNormActivation(
|
321 |
-
# num_channels,
|
322 |
-
# firstconv_output_channels,
|
323 |
-
# kernel_size=3,
|
324 |
-
# stride=2,
|
325 |
-
# norm_layer=norm_layer,
|
326 |
-
# activation_layer=nn.SiLU,
|
327 |
-
# )
|
328 |
-
# )
|
329 |
-
|
330 |
-
# # building inverted residual blocks
|
331 |
-
# total_stage_blocks = sum(cnf.num_layers for cnf in inverted_residual_setting)
|
332 |
-
# stage_block_id = 0
|
333 |
-
# for cnf in inverted_residual_setting:
|
334 |
-
# stage: List[nn.Module] = []
|
335 |
-
# for _ in range(cnf.num_layers):
|
336 |
-
# # copy to avoid modifications. shallow copy is enough
|
337 |
-
# block_cnf = copy.copy(cnf)
|
338 |
-
|
339 |
-
# # overwrite info if not the first conv in the stage
|
340 |
-
# if stage:
|
341 |
-
# block_cnf.input_channels = block_cnf.out_channels
|
342 |
-
# block_cnf.stride = 1
|
343 |
-
|
344 |
-
# # adjust stochastic depth probability based on the depth of the stage block
|
345 |
-
# sd_prob = (
|
346 |
-
# stochastic_depth_prob * float(stage_block_id) / total_stage_blocks
|
347 |
-
# )
|
348 |
-
|
349 |
-
# stage.append(block_cnf.block(block_cnf, sd_prob, norm_layer))
|
350 |
-
# stage_block_id += 1
|
351 |
-
|
352 |
-
# layers.append(nn.Sequential(*stage))
|
353 |
-
|
354 |
-
# # building last several layers
|
355 |
-
# lastconv_input_channels = inverted_residual_setting[-1].out_channels
|
356 |
-
# lastconv_output_channels = (
|
357 |
-
# last_channel if last_channel is not None else 4 * lastconv_input_channels
|
358 |
-
# )
|
359 |
-
# layers.append(
|
360 |
-
# Conv2dNormActivation(
|
361 |
-
# lastconv_input_channels,
|
362 |
-
# lastconv_output_channels,
|
363 |
-
# kernel_size=1,
|
364 |
-
# norm_layer=norm_layer,
|
365 |
-
# activation_layer=nn.SiLU,
|
366 |
-
# )
|
367 |
-
# )
|
368 |
-
|
369 |
-
# self.features = nn.Sequential(*layers)
|
370 |
-
# self.avgpool = nn.AdaptiveAvgPool2d(1)
|
371 |
-
# self.classifier = nn.Sequential(
|
372 |
-
# nn.Dropout(p=dropout, inplace=True),
|
373 |
-
# nn.Linear(lastconv_output_channels, num_classes),
|
374 |
-
# )
|
375 |
-
|
376 |
-
# for m in self.modules():
|
377 |
-
# if isinstance(m, nn.Conv2d):
|
378 |
-
# nn.init.kaiming_normal_(m.weight, mode="fan_out")
|
379 |
-
# if m.bias is not None:
|
380 |
-
# nn.init.zeros_(m.bias)
|
381 |
-
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
|
382 |
-
# nn.init.ones_(m.weight)
|
383 |
-
# nn.init.zeros_(m.bias)
|
384 |
-
# elif isinstance(m, nn.Linear):
|
385 |
-
# init_range = 1.0 / math.sqrt(m.out_features)
|
386 |
-
# nn.init.uniform_(m.weight, -init_range, init_range)
|
387 |
-
# nn.init.zeros_(m.bias)
|
388 |
-
|
389 |
-
# super().__init__(**kwargs)
|
390 |
-
|
391 |
-
# def _forward_impl(self, x: Tensor) -> Tensor:
|
392 |
-
# x = self.features(x)
|
393 |
-
|
394 |
-
# x = self.avgpool(x)
|
395 |
-
# x = torch.flatten(x, 1)
|
396 |
-
|
397 |
-
# x = self.classifier(x)
|
398 |
-
|
399 |
-
# return x
|
400 |
-
|
401 |
-
# def forward(self, x: Tensor) -> Tensor:
|
402 |
-
# return self._forward_impl(x)
|
403 |
|
404 |
|
405 |
class EfficientNet(nn.Module):
|
406 |
|
407 |
-
model_type = "efficientnet"
|
408 |
|
409 |
def __init__(
|
410 |
self,
|
|
|
1 |
+
from transformers import PretrainedConfig, PreTrainedModel
|
2 |
from typing import List
|
3 |
|
4 |
import copy
|
|
|
271 |
"""
|
272 |
|
273 |
|
274 |
+
# self.model = EfficientNet(
|
275 |
+
# dropout=dropout,
|
276 |
+
# num_channels=num_channels,
|
277 |
+
# num_classes=num_classes,
|
278 |
+
# size=size,
|
279 |
+
# stochastic_depth_prob=stochastic_depth_prob,
|
280 |
+
# width_mult=width_mult,
|
281 |
+
# depth_mult=depth_mult,
|
282 |
+
# )
|
283 |
|
284 |
+
#
|
285 |
+
self.dropout=dropout
|
286 |
+
self.num_channels=num_channels
|
287 |
+
self.num_classes=num_classes
|
288 |
+
self.size=size
|
289 |
+
self.stochastic_depth_prob=stochastic_depth_prob
|
290 |
+
self.width_mult=width_mult
|
291 |
+
self.depth_mult=depth_mult
|
292 |
+
|
293 |
super().__init__(**kwargs)
|
294 |
|
295 |
+
|
296 |
+
class EfficientNetPreTrained(PreTrainedModel):
|
297 |
+
|
298 |
+
config_class = EfficientNetConfig
|
299 |
+
|
300 |
+
def __init__(
|
301 |
+
self,
|
302 |
+
config
|
303 |
+
)
|
304 |
+
super().__init__(config)
|
305 |
+
self.model = EfficientNet( dropout=config.dropout,
|
306 |
+
num_channels=config.num_channels,
|
307 |
+
num_classes=config.num_classes,
|
308 |
+
size=config.size,
|
309 |
+
stochastic_depth_prob=config.stochastic_depth_prob,
|
310 |
+
width_mult=config.width_mult,
|
311 |
+
depth_mult=config.depth_mult,)
|
312 |
+
|
313 |
+
def forward(self, tensor):
|
314 |
+
return self.model.forward(tensor)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
315 |
|
316 |
|
317 |
class EfficientNet(nn.Module):
|
318 |
|
|
|
319 |
|
320 |
def __init__(
|
321 |
self,
|