Spaces:
Configuration error
Configuration error
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
from timm.models.layers import DropPath as TimmDropPath | |
class Conv2d_BN(torch.nn.Sequential): | |
def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1, | |
groups=1, bn_weight_init=1): | |
super().__init__() | |
self.add_module('c', torch.nn.Conv2d( | |
a, b, ks, stride, pad, dilation, groups, bias=False)) | |
bn = torch.nn.BatchNorm2d(b) | |
torch.nn.init.constant_(bn.weight, bn_weight_init) | |
torch.nn.init.constant_(bn.bias, 0) | |
self.add_module('bn', bn) | |
def fuse(self): | |
c, bn = self._modules.values() | |
w = bn.weight / (bn.running_var + bn.eps)**0.5 | |
w = c.weight * w[:, None, None, None] | |
b = bn.bias - bn.running_mean * bn.weight / \ | |
(bn.running_var + bn.eps)**0.5 | |
m = torch.nn.Conv2d(w.size(1) * self.c.groups, w.size( | |
0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups) | |
m.weight.data.copy_(w) | |
m.bias.data.copy_(b) | |
return m | |
class Mlp(nn.Module): | |
def __init__(self, in_features, hidden_features=None, | |
out_features=None, act_layer=nn.GELU, drop=0.): | |
super().__init__() | |
out_features = out_features or in_features | |
hidden_features = hidden_features or in_features | |
self.norm = nn.LayerNorm(in_features) | |
self.fc1 = nn.Linear(in_features, hidden_features) | |
self.fc2 = nn.Linear(hidden_features, out_features) | |
self.act = act_layer() | |
self.drop = nn.Dropout(drop) | |
def forward(self, x): | |
x = self.norm(x) | |
x = self.fc1(x) | |
x = self.act(x) | |
x = self.drop(x) | |
x = self.fc2(x) | |
x = self.drop(x) | |
return x | |
class DropPath(TimmDropPath): | |
def __init__(self, drop_prob=None): | |
super().__init__(drop_prob=drop_prob) | |
self.drop_prob = drop_prob | |
def __repr__(self): | |
msg = super().__repr__() | |
msg += f'(drop_prob={self.drop_prob})' | |
return msg |