code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def make_temporal_shift(self): """Make temporal shift for some layers.""" for m in self.modules(): if isinstance(m, InvertedResidual) and \ len(m.conv) == 3 and m.use_res_connect: m.conv[0] = TemporalShift( m.conv[0], num_segments=self.num_segments, shift_div=self.shift_div, )
Make temporal shift for some layers.
make_temporal_shift
python
open-mmlab/mmaction2
mmaction/models/backbones/mobilenet_v2_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2_tsm.py
Apache-2.0
def init_structure(self): """Initiate the parameters either from existing checkpoint or from scratch.""" if self.is_shift: self.make_temporal_shift()
Initiate the parameters either from existing checkpoint or from scratch.
init_structure
python
open-mmlab/mmaction2
mmaction/models/backbones/mobilenet_v2_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2_tsm.py
Apache-2.0
def init_weights(self): """Initiate the parameters either from existing checkpoint or from scratch.""" if self.pretrained2d: logger = MMLogger.get_current_instance() self.load_original_weights(logger) else: if self.pretrained: self.init_cfg = dict( type='Pretrained', checkpoint=self.pretrained) super().init_weights()
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/mobilenet_v2_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2_tsm.py
Apache-2.0
def window_partition(x: torch.Tensor, window_size: Sequence[int]) -> torch.Tensor: """ Args: x (torch.Tensor): The input features of shape :math:`(B, D, H, W, C)`. window_size (Sequence[int]): The window size, :math:`(w_d, w_h, w_w)`. Returns: torch.Tensor: The partitioned windows of shape :math:`(B*num_windows, w_d*w_h*w_w, C)`. """ B, D, H, W, C = x.shape x = x.view(B, D // window_size[0], window_size[0], H // window_size[1], window_size[1], W // window_size[2], window_size[2], C) windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, reduce(mul, window_size), C) return windows
Args: x (torch.Tensor): The input features of shape :math:`(B, D, H, W, C)`. window_size (Sequence[int]): The window size, :math:`(w_d, w_h, w_w)`. Returns: torch.Tensor: The partitioned windows of shape :math:`(B*num_windows, w_d*w_h*w_w, C)`.
window_partition
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def window_reverse(windows: torch.Tensor, window_size: Sequence[int], B: int, D: int, H: int, W: int) -> torch.Tensor: """ Args: windows (torch.Tensor): Input windows of shape :meth:`(B*num_windows, w_d, w_h, w_w, C)`. window_size (Sequence[int]): The window size, :meth:`(w_d, w_h, w_w)`. B (int): Batch size of feature maps. D (int): Temporal length of feature maps. H (int): Height of feature maps. W (int): Width of feature maps. Returns: torch.Tensor: The feature maps reversed from windows of shape :math:`(B, D, H, W, C)`. """ x = windows.view(B, D // window_size[0], H // window_size[1], W // window_size[2], window_size[0], window_size[1], window_size[2], -1) x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(B, D, H, W, -1) return x
Args: windows (torch.Tensor): Input windows of shape :meth:`(B*num_windows, w_d, w_h, w_w, C)`. window_size (Sequence[int]): The window size, :meth:`(w_d, w_h, w_w)`. B (int): Batch size of feature maps. D (int): Temporal length of feature maps. H (int): Height of feature maps. W (int): Width of feature maps. Returns: torch.Tensor: The feature maps reversed from windows of shape :math:`(B, D, H, W, C)`.
window_reverse
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def get_window_size( x_size: Sequence[int], window_size: Sequence[int], shift_size: Optional[Sequence[int]] = None ) -> Union[Tuple[int], Tuple[Tuple[int]]]: """Calculate window size and shift size according to the input size. Args: x_size (Sequence[int]): The input size. window_size (Sequence[int]): The expected window size. shift_size (Sequence[int], optional): The expected shift size. Defaults to None. Returns: tuple: The calculated window size and shift size. """ use_window_size = list(window_size) if shift_size is not None: use_shift_size = list(shift_size) for i in range(len(x_size)): if x_size[i] <= window_size[i]: use_window_size[i] = x_size[i] if shift_size is not None: use_shift_size[i] = 0 if shift_size is None: return tuple(use_window_size) else: return tuple(use_window_size), tuple(use_shift_size)
Calculate window size and shift size according to the input size. Args: x_size (Sequence[int]): The input size. window_size (Sequence[int]): The expected window size. shift_size (Sequence[int], optional): The expected shift size. Defaults to None. Returns: tuple: The calculated window size and shift size.
get_window_size
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def compute_mask(D: int, H: int, W: int, window_size: Sequence[int], shift_size: Sequence[int], device: Union[str, torch.device]) -> torch.Tensor: """Compute attention mask. Args: D (int): Temporal length of feature maps. H (int): Height of feature maps. W (int): Width of feature maps. window_size (Sequence[int]): The window size. shift_size (Sequence[int]): The shift size. device (str or :obj:`torch.device`): The device of the mask. Returns: torch.Tensor: The attention mask used for shifted window attention. """ img_mask = torch.zeros((1, D, H, W, 1), device=device) # 1 Dp Hp Wp 1 cnt = 0 for d in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice( -shift_size[0], None): for h in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice( -shift_size[1], None): for w in slice(-window_size[2]), slice(-window_size[2], -shift_size[2]), slice( -shift_size[2], None): img_mask[:, d, h, w, :] = cnt cnt += 1 mask_windows = window_partition(img_mask, window_size) # nW, ws[0]*ws[1]*ws[2], 1 mask_windows = mask_windows.squeeze(-1) # nW, ws[0]*ws[1]*ws[2] attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( attn_mask == 0, float(0.0)) return attn_mask
Compute attention mask. Args: D (int): Temporal length of feature maps. H (int): Height of feature maps. W (int): Width of feature maps. window_size (Sequence[int]): The window size. shift_size (Sequence[int]): The shift size. device (str or :obj:`torch.device`): The device of the mask. Returns: torch.Tensor: The attention mask used for shifted window attention.
compute_mask
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor: """Forward function. Args: x (torch.Tensor): Input feature maps of shape :meth:`(B*num_windows, N, C)`. mask (torch.Tensor, optional): (0/-inf) mask of shape :meth:`(num_windows, N, N)`. Defaults to None. """ B_, N, C = x.shape qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # B_, nH, N, C q = q * self.scale attn = q @ k.transpose(-2, -1) relative_position_bias = self.relative_position_bias_table[ self.relative_position_index[:N, :N].reshape(-1)].reshape( N, N, -1) # Wd*Wh*Ww,Wd*Wh*Ww,nH relative_position_bias = relative_position_bias.permute( 2, 0, 1).contiguous() # nH, Wd*Wh*Ww, Wd*Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) # B_, nH, N, N if mask is not None: nW = mask.shape[0] attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x = self.proj_drop(x) return x
Forward function. Args: x (torch.Tensor): Input feature maps of shape :meth:`(B*num_windows, N, C)`. mask (torch.Tensor, optional): (0/-inf) mask of shape :meth:`(num_windows, N, N)`. Defaults to None.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward function.""" x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x
Forward function.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward_part1(self, x: torch.Tensor, mask_matrix: torch.Tensor) -> torch.Tensor: """Forward function part1.""" B, D, H, W, C = x.shape window_size, shift_size = get_window_size((D, H, W), self.window_size, self.shift_size) x = self.norm1(x) # pad feature maps to multiples of window size pad_l = pad_t = pad_d0 = 0 pad_d1 = (window_size[0] - D % window_size[0]) % window_size[0] pad_b = (window_size[1] - H % window_size[1]) % window_size[1] pad_r = (window_size[2] - W % window_size[2]) % window_size[2] x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b, pad_d0, pad_d1)) _, Dp, Hp, Wp, _ = x.shape # cyclic shift if any(i > 0 for i in shift_size): shifted_x = torch.roll( x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3)) attn_mask = mask_matrix else: shifted_x = x attn_mask = None # partition windows x_windows = window_partition(shifted_x, window_size) # B*nW, Wd*Wh*Ww, C # W-MSA/SW-MSA attn_windows = self.attn( x_windows, mask=attn_mask) # B*nW, Wd*Wh*Ww, C # merge windows attn_windows = attn_windows.view(-1, *(window_size + (C, ))) shifted_x = window_reverse(attn_windows, window_size, B, Dp, Hp, Wp) # B D' H' W' C # reverse cyclic shift if any(i > 0 for i in shift_size): x = torch.roll( shifted_x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3)) else: x = shifted_x if pad_d1 > 0 or pad_r > 0 or pad_b > 0: x = x[:, :D, :H, :W, :].contiguous() return x
Forward function part1.
forward_part1
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward_part2(self, x: torch.Tensor) -> torch.Tensor: """Forward function part2.""" return self.drop_path(self.mlp(self.norm2(x)))
Forward function part2.
forward_part2
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward(self, x: torch.Tensor, mask_matrix: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): Input features of shape :math:`(B, D, H, W, C)`. mask_matrix (torch.Tensor): Attention mask for cyclic shift. """ shortcut = x if self.with_cp: x = checkpoint.checkpoint(self.forward_part1, x, mask_matrix) else: x = self.forward_part1(x, mask_matrix) x = shortcut + self.drop_path(x) if self.with_cp: x = x + checkpoint.checkpoint(self.forward_part2, x) else: x = x + self.forward_part2(x) return x
Args: x (torch.Tensor): Input features of shape :math:`(B, D, H, W, C)`. mask_matrix (torch.Tensor): Attention mask for cyclic shift.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Perform patch merging. Args: x (torch.Tensor): Input feature maps of shape :math:`(B, D, H, W, C)`. Returns: torch.Tensor: The merged feature maps of shape :math:`(B, D, H/2, W/2, 2*C)`. """ B, D, H, W, C = x.shape # padding pad_input = (H % 2 == 1) or (W % 2 == 1) if pad_input: x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) x0 = x[:, :, 0::2, 0::2, :] # B D H/2 W/2 C x1 = x[:, :, 1::2, 0::2, :] # B D H/2 W/2 C x2 = x[:, :, 0::2, 1::2, :] # B D H/2 W/2 C x3 = x[:, :, 1::2, 1::2, :] # B D H/2 W/2 C x = torch.cat([x0, x1, x2, x3], -1) # B D H/2 W/2 4*C x = self.norm(x) x = self.reduction(x) return x
Perform patch merging. Args: x (torch.Tensor): Input feature maps of shape :math:`(B, D, H, W, C)`. Returns: torch.Tensor: The merged feature maps of shape :math:`(B, D, H/2, W/2, 2*C)`.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward(self, x: torch.Tensor, do_downsample: bool = True) -> torch.Tensor: """Forward function. Args: x (torch.Tensor): Input feature maps of shape :math:`(B, C, D, H, W)`. do_downsample (bool): Whether to downsample the output of the current layer. Defaults to True. """ # calculate attention mask for SW-MSA B, C, D, H, W = x.shape window_size, shift_size = get_window_size((D, H, W), self.window_size, self.shift_size) x = rearrange(x, 'b c d h w -> b d h w c') Dp = int(np.ceil(D / window_size[0])) * window_size[0] Hp = int(np.ceil(H / window_size[1])) * window_size[1] Wp = int(np.ceil(W / window_size[2])) * window_size[2] attn_mask = compute_mask(Dp, Hp, Wp, window_size, shift_size, x.device) for blk in self.blocks: x = blk(x, attn_mask) if self.downsample is not None and do_downsample: x = self.downsample(x) return x
Forward function. Args: x (torch.Tensor): Input feature maps of shape :math:`(B, C, D, H, W)`. do_downsample (bool): Whether to downsample the output of the current layer. Defaults to True.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Perform video to patch embedding. Args: x (torch.Tensor): The input videos of shape :math:`(B, C, D, H, W)`. In most cases, C is 3. Returns: torch.Tensor: The video patches of shape :math:`(B, embed_dims, Dp, Hp, Wp)`. """ _, _, D, H, W = x.size() if W % self.patch_size[2] != 0: x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2])) if H % self.patch_size[1] != 0: x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1])) if D % self.patch_size[0] != 0: x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self.patch_size[0])) x = self.proj(x) # B C Dp Wp Wp if self.norm is not None: Dp, Hp, Wp = x.size(2), x.size(3), x.size(4) x = x.flatten(2).transpose(1, 2) # B Dp*Hp*Wp C x = self.norm(x) x = x.transpose(1, 2).view(-1, self.embed_dims, Dp, Hp, Wp) return x
Perform video to patch embedding. Args: x (torch.Tensor): The input videos of shape :math:`(B, C, D, H, W)`. In most cases, C is 3. Returns: torch.Tensor: The video patches of shape :math:`(B, embed_dims, Dp, Hp, Wp)`.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def _freeze_stages(self) -> None: """Prevent all the parameters from being optimized before ``self.frozen_stages``.""" if self.frozen_stages >= 0: self.patch_embed.eval() for param in self.patch_embed.parameters(): param.requires_grad = False if self.frozen_stages >= 1: self.pos_drop.eval() for i in range(0, self.frozen_stages): m = self.layers[i] m.eval() for param in m.parameters(): param.requires_grad = False
Prevent all the parameters from being optimized before ``self.frozen_stages``.
_freeze_stages
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def inflate_weights(self, logger: MMLogger) -> None: """Inflate the swin2d parameters to swin3d. The differences between swin3d and swin2d mainly lie in an extra axis. To utilize the pretrained parameters in 2d model, the weight of swin2d models should be inflated to fit in the shapes of the 3d counterpart. Args: logger (MMLogger): The logger used to print debugging information. """ checkpoint = _load_checkpoint(self.pretrained, map_location='cpu') state_dict = checkpoint['model'] # delete relative_position_index since we always re-init it relative_position_index_keys = [ k for k in state_dict.keys() if 'relative_position_index' in k ] for k in relative_position_index_keys: del state_dict[k] # delete attn_mask since we always re-init it attn_mask_keys = [k for k in state_dict.keys() if 'attn_mask' in k] for k in attn_mask_keys: del state_dict[k] state_dict['patch_embed.proj.weight'] = \ state_dict['patch_embed.proj.weight'].unsqueeze(2).\ repeat(1, 1, self.patch_size[0], 1, 1) / self.patch_size[0] # bicubic interpolate relative_position_bias_table if not match relative_position_bias_table_keys = [ k for k in state_dict.keys() if 'relative_position_bias_table' in k ] for k in relative_position_bias_table_keys: relative_position_bias_table_pretrained = state_dict[k] relative_position_bias_table_current = self.state_dict()[k] L1, nH1 = relative_position_bias_table_pretrained.size() L2, nH2 = relative_position_bias_table_current.size() L2 = (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1) wd = self.window_size[0] if nH1 != nH2: logger.warning(f'Error in loading {k}, passing') else: if L1 != L2: S1 = int(L1**0.5) relative_position_bias_table_pretrained_resized = \ torch.nn.functional.interpolate( relative_position_bias_table_pretrained.permute( 1, 0).view(1, nH1, S1, S1), size=(2 * self.window_size[1] - 1, 2 * self.window_size[2] - 1), mode='bicubic') relative_position_bias_table_pretrained = \ relative_position_bias_table_pretrained_resized. \ view(nH2, L2).permute(1, 0) state_dict[k] = relative_position_bias_table_pretrained.repeat( 2 * wd - 1, 1) # In the original swin2d checkpoint, the last layer of the # backbone is the norm layer, and the original attribute # name is `norm`. We changed it to `norm3` which means it # is the last norm layer of stage 4. if hasattr(self, 'norm3'): state_dict['norm3.weight'] = state_dict['norm.weight'] state_dict['norm3.bias'] = state_dict['norm.bias'] del state_dict['norm.weight'] del state_dict['norm.bias'] msg = self.load_state_dict(state_dict, strict=False) logger.info(msg)
Inflate the swin2d parameters to swin3d. The differences between swin3d and swin2d mainly lie in an extra axis. To utilize the pretrained parameters in 2d model, the weight of swin2d models should be inflated to fit in the shapes of the 3d counterpart. Args: logger (MMLogger): The logger used to print debugging information.
inflate_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def init_weights(self) -> None: """Initialize the weights in backbone.""" if self.pretrained2d: logger = MMLogger.get_current_instance() logger.info(f'load model from: {self.pretrained}') # Inflate 2D model into 3D model. self.inflate_weights(logger) else: if self.pretrained: self.init_cfg = dict( type='Pretrained', checkpoint=self.pretrained) super().init_weights()
Initialize the weights in backbone.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward(self, x: torch.Tensor) -> \ Union[Tuple[torch.Tensor], torch.Tensor]: """Forward function for Swin3d Transformer.""" x = self.patch_embed(x) x = self.pos_drop(x) outs = [] for i, layer in enumerate(self.layers): x = layer(x.contiguous(), do_downsample=self.out_after_downsample) if i in self.out_indices: norm_layer = getattr(self, f'norm{i}') out = norm_layer(x) out = rearrange(out, 'b d h w c -> b c d h w').contiguous() outs.append(out) if layer.downsample is not None and not self.out_after_downsample: x = layer.downsample(x) if i < self.num_layers - 1: x = rearrange(x, 'b d h w c -> b c d h w') if len(outs) == 1: return outs[0] return tuple(outs)
Forward function for Swin3d Transformer.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def train(self, mode: bool = True) -> None: """Convert the model into training mode while keep layers frozen.""" super(SwinTransformer3D, self).train(mode) self._freeze_stages()
Convert the model into training mode while keep layers frozen.
train
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def init_weights(self): """Initialize weights.""" # Lecun norm from ClassyVision kaiming_init(self.projection, mode='fan_in', nonlinearity='linear')
Initialize weights.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/timesformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/timesformer.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The output of the module. """ x = rearrange(x, 'b c t h w -> (b t) c h w') x = self.projection(x).flatten(2).transpose(1, 2) return x
Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The output of the module.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/timesformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/timesformer.py
Apache-2.0
def init_weights(self, pretrained=None): """Initiate the parameters either from existing checkpoint or from scratch.""" trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) if pretrained: self.pretrained = pretrained if isinstance(self.pretrained, str): logger = MMLogger.get_current_instance() logger.info(f'load model from: {self.pretrained}') state_dict = _load_checkpoint(self.pretrained, map_location='cpu') if 'state_dict' in state_dict: state_dict = state_dict['state_dict'] if self.attention_type == 'divided_space_time': # modify the key names of norm layers old_state_dict_keys = list(state_dict.keys()) for old_key in old_state_dict_keys: if 'norms' in old_key: new_key = old_key.replace('norms.0', 'attentions.0.norm') new_key = new_key.replace('norms.1', 'ffns.0.norm') state_dict[new_key] = state_dict.pop(old_key) # copy the parameters of space attention to time attention old_state_dict_keys = list(state_dict.keys()) for old_key in old_state_dict_keys: if 'attentions.0' in old_key: new_key = old_key.replace('attentions.0', 'attentions.1') state_dict[new_key] = state_dict[old_key].clone() load_state_dict(self, state_dict, strict=False, logger=logger)
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/timesformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/timesformer.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call.""" # x [batch_size * num_frames, num_patches, embed_dims] batches = x.shape[0] x = self.patch_embed(x) # x [batch_size * num_frames, num_patches + 1, embed_dims] cls_tokens = self.cls_token.expand(x.size(0), -1, -1) x = torch.cat((cls_tokens, x), dim=1) x = x + self.pos_embed x = self.drop_after_pos(x) # Add Time Embedding if self.attention_type != 'space_only': # x [batch_size, num_patches * num_frames + 1, embed_dims] cls_tokens = x[:batches, 0, :].unsqueeze(1) x = rearrange(x[:, 1:, :], '(b t) p m -> (b p) t m', b=batches) x = x + self.time_embed x = self.drop_after_time(x) x = rearrange(x, '(b p) t m -> b (p t) m', b=batches) x = torch.cat((cls_tokens, x), dim=1) x = self.transformer_layers(x, None, None) if self.attention_type == 'space_only': # x [batch_size, num_patches + 1, embed_dims] x = x.view(-1, self.num_frames, *x.size()[-2:]) x = torch.mean(x, 1) x = self.norm(x) # Return Class Token return x[:, 0]
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/timesformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/timesformer.py
Apache-2.0
def _make_stem_layer(self) -> None: """Construct the stem layers consists of a conv+norm+act module and a pooling layer.""" self.conv1 = ConvModule( self.in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.maxpool3d_1 = nn.MaxPool3d( kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 0, 0)) self.maxpool3d_2 = nn.MaxPool3d( kernel_size=(2, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0))
Construct the stem layers consists of a conv+norm+act module and a pooling layer.
_make_stem_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/c2d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/c2d.py
Apache-2.0
def _convert_to_2d(x: torch.Tensor) -> torch.Tensor: """(N, C, T, H, W) -> (N x T, C, H, W)""" x = x.permute((0, 2, 1, 3, 4)) x = x.reshape(-1, x.shape[2], x.shape[3], x.shape[4]) return x
(N, C, T, H, W) -> (N x T, C, H, W)
forward._convert_to_2d
python
open-mmlab/mmaction2
mmaction/models/backbones/c2d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/c2d.py
Apache-2.0
def _convert_to_3d(x: torch.Tensor) -> torch.Tensor: """(N x T, C, H, W) -> (N, C, T, H, W)""" x = x.reshape(batches, -1, x.shape[1], x.shape[2], x.shape[3]) x = x.permute((0, 2, 1, 3, 4)) return x
(N x T, C, H, W) -> (N, C, T, H, W)
forward._convert_to_3d
python
open-mmlab/mmaction2
mmaction/models/backbones/c2d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/c2d.py
Apache-2.0
def forward(self, x: torch.Tensor) \ -> Union[torch.Tensor, Tuple[torch.Tensor]]: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: Union[torch.Tensor or Tuple[torch.Tensor]]: The feature of the input samples extracted by the backbone. """ batches = x.shape[0] def _convert_to_2d(x: torch.Tensor) -> torch.Tensor: """(N, C, T, H, W) -> (N x T, C, H, W)""" x = x.permute((0, 2, 1, 3, 4)) x = x.reshape(-1, x.shape[2], x.shape[3], x.shape[4]) return x def _convert_to_3d(x: torch.Tensor) -> torch.Tensor: """(N x T, C, H, W) -> (N, C, T, H, W)""" x = x.reshape(batches, -1, x.shape[1], x.shape[2], x.shape[3]) x = x.permute((0, 2, 1, 3, 4)) return x x = _convert_to_2d(x) x = self.conv1(x) x = _convert_to_3d(x) x = self.maxpool3d_1(x) x = _convert_to_2d(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i == 0: x = _convert_to_3d(x) x = self.maxpool3d_2(x) x = _convert_to_2d(x) if i in self.out_indices: x = _convert_to_3d(x) outs.append(x) if len(outs) == 1: return outs[0] return tuple(outs)
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: Union[torch.Tensor or Tuple[torch.Tensor]]: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/c2d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/c2d.py
Apache-2.0
def make_block_temporal(stage, num_segments): """Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks. """ blocks = list(stage.children()) for i, b in enumerate(blocks): blocks[i] = TemporalShift( b, num_segments=num_segments, shift_div=self.shift_div) return nn.Sequential(*blocks)
Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks.
make_temporal_shift.make_block_temporal
python
open-mmlab/mmaction2
mmaction/models/backbones/mobileone_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobileone_tsm.py
Apache-2.0
def make_temporal_shift(self): """Make temporal shift for some layers. To make reparameterization work, we can only build the shift layer before the 'block', instead of the 'blockres' """ def make_block_temporal(stage, num_segments): """Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks. """ blocks = list(stage.children()) for i, b in enumerate(blocks): blocks[i] = TemporalShift( b, num_segments=num_segments, shift_div=self.shift_div) return nn.Sequential(*blocks) self.stage0 = make_block_temporal( nn.Sequential(self.stage0), self.num_segments)[0] for i in range(1, 5): temporal_stage = make_block_temporal( getattr(self, f'stage{i}'), self.num_segments) setattr(self, f'stage{i}', temporal_stage)
Make temporal shift for some layers. To make reparameterization work, we can only build the shift layer before the 'block', instead of the 'blockres'
make_temporal_shift
python
open-mmlab/mmaction2
mmaction/models/backbones/mobileone_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobileone_tsm.py
Apache-2.0
def init_structure(self): """Initiate the parameters either from existing checkpoint or from scratch.""" if self.is_shift: self.make_temporal_shift()
Initiate the parameters either from existing checkpoint or from scratch.
init_structure
python
open-mmlab/mmaction2
mmaction/models/backbones/mobileone_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobileone_tsm.py
Apache-2.0
def init_weights(self): """Initiate the parameters either from existing checkpoint or from scratch.""" if self.pretrained2d: logger = MMLogger.get_current_instance() self.load_original_weights(logger) else: super().init_weights()
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/mobileone_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobileone_tsm.py
Apache-2.0
def forward(self, x): """unpack tuple result.""" x = super().forward(x) if isinstance(x, tuple): assert len(x) == 1 x = x[0] return x
unpack tuple result.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/mobileone_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobileone_tsm.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" res = self.residual(x) x = self.tcn(self.gcn(x)) + res return self.relu(x)
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/stgcn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/stgcn.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" N, M, T, V, C = x.size() x = x.permute(0, 1, 3, 4, 2).contiguous() if self.data_bn_type == 'MVC': x = self.data_bn(x.view(N, M * V * C, T)) else: x = self.data_bn(x.view(N * M, V * C, T)) x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) for i in range(self.num_stages): x = self.gcn[i](x) x = x.reshape((N, M) + x.shape[1:]) return x
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/stgcn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/stgcn.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" # x should be a 5-d tensor assert len(x.shape) == 5 N, C, T, H, W = x.shape out_shape = (N, self.out_channels, self.stride[0] * T, self.stride[1] * H, self.stride[2] * W) x = self.conv(x, output_size=out_shape) if self.with_bn: x = self.bn(x) if self.with_relu: x = self.relu(x) return x
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def _calculate_lateral_inplanes(self, kwargs): """Calculate inplanes for lateral connection.""" depth = kwargs.get('depth', 50) expansion = 1 if depth < 50 else 4 base_channels = kwargs.get('base_channels', 64) lateral_inplanes = [] for i in range(kwargs.get('num_stages', 4)): if expansion % 2 == 0: planes = base_channels * (2 ** i) * \ ((expansion // 2) ** (i > 0)) else: planes = base_channels * (2**i) // (2**(i > 0)) if self.lateral and self.lateral_activate[i]: if self.lateral_inv: lateral_inplane = planes * \ self.channel_ratio // self.lateral_infl else: lateral_inplane = planes * \ self.lateral_infl // self.channel_ratio else: lateral_inplane = 0 lateral_inplanes.append(lateral_inplane) self.lateral_inplanes = lateral_inplanes
Calculate inplanes for lateral connection.
_calculate_lateral_inplanes
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def inflate_weights(self, logger: MMLogger) -> None: """Inflate the resnet2d parameters to resnet3d pathway. The differences between resnet3d and resnet2d mainly lie in an extra axis of conv kernel. To utilize the pretrained parameters in 2d model, the weight of conv2d models should be inflated to fit in the shapes of the 3d counterpart. For pathway the ``lateral_connection`` part should not be inflated from 2d weights. Args: logger (MMLogger): The logger used to print debugging information. """ state_dict_r2d = _load_checkpoint(self.pretrained, map_location='cpu') if 'state_dict' in state_dict_r2d: state_dict_r2d = state_dict_r2d['state_dict'] inflated_param_names = [] for name, module in self.named_modules(): if 'lateral' in name: continue if isinstance(module, ConvModule): # we use a ConvModule to wrap conv+bn+relu layers, thus the # name mapping is needed if 'downsample' in name: # layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0 original_conv_name = name + '.0' # layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1 original_bn_name = name + '.1' else: # layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n} original_conv_name = name # layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n} original_bn_name = name.replace('conv', 'bn') if original_conv_name + '.weight' not in state_dict_r2d: logger.warning(f'Module not exist in the state_dict_r2d' f': {original_conv_name}') else: self._inflate_conv_params(module.conv, state_dict_r2d, original_conv_name, inflated_param_names) if original_bn_name + '.weight' not in state_dict_r2d: logger.warning(f'Module not exist in the state_dict_r2d' f': {original_bn_name}') else: self._inflate_bn_params(module.bn, state_dict_r2d, original_bn_name, inflated_param_names) # check if any parameters in the 2d checkpoint are not loaded remaining_names = set( state_dict_r2d.keys()) - set(inflated_param_names) if remaining_names: logger.info(f'These parameters in the 2d checkpoint are not loaded' f': {remaining_names}')
Inflate the resnet2d parameters to resnet3d pathway. The differences between resnet3d and resnet2d mainly lie in an extra axis of conv kernel. To utilize the pretrained parameters in 2d model, the weight of conv2d models should be inflated to fit in the shapes of the 3d counterpart. For pathway the ``lateral_connection`` part should not be inflated from 2d weights. Args: logger (MMLogger): The logger used to print debugging information.
inflate_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def _inflate_conv_params(self, conv3d: nn.Module, state_dict_2d: OrderedDict, module_name_2d: str, inflated_param_names: List[str]) -> None: """Inflate a conv module from 2d to 3d. The differences of conv modules betweene 2d and 3d in Pathway mainly lie in the inplanes due to lateral connections. To fit the shapes of the lateral connection counterpart, it will expand parameters by concatting conv2d parameters and extra zero paddings. Args: conv3d (nn.Module): The destination conv3d module. state_dict_2d (OrderedDict): The state dict of pretrained 2d model. module_name_2d (str): The name of corresponding conv module in the 2d model. inflated_param_names (list[str]): List of parameters that have been inflated. """ weight_2d_name = module_name_2d + '.weight' conv2d_weight = state_dict_2d[weight_2d_name] old_shape = conv2d_weight.shape new_shape = conv3d.weight.data.shape kernel_t = new_shape[2] if new_shape[1] != old_shape[1]: if new_shape[1] < old_shape[1]: warnings.warn(f'The parameter of {module_name_2d} is not' 'loaded due to incompatible shapes. ') return # Inplanes may be different due to lateral connections new_channels = new_shape[1] - old_shape[1] pad_shape = old_shape pad_shape = pad_shape[:1] + (new_channels, ) + pad_shape[2:] # Expand parameters by concat extra channels conv2d_weight = torch.cat( (conv2d_weight, torch.zeros(pad_shape).type_as(conv2d_weight).to( conv2d_weight.device)), dim=1) new_weight = conv2d_weight.data.unsqueeze(2).expand_as( conv3d.weight) / kernel_t conv3d.weight.data.copy_(new_weight) inflated_param_names.append(weight_2d_name) if getattr(conv3d, 'bias') is not None: bias_2d_name = module_name_2d + '.bias' conv3d.bias.data.copy_(state_dict_2d[bias_2d_name]) inflated_param_names.append(bias_2d_name)
Inflate a conv module from 2d to 3d. The differences of conv modules betweene 2d and 3d in Pathway mainly lie in the inplanes due to lateral connections. To fit the shapes of the lateral connection counterpart, it will expand parameters by concatting conv2d parameters and extra zero paddings. Args: conv3d (nn.Module): The destination conv3d module. state_dict_2d (OrderedDict): The state dict of pretrained 2d model. module_name_2d (str): The name of corresponding conv module in the 2d model. inflated_param_names (list[str]): List of parameters that have been inflated.
_inflate_conv_params
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def _freeze_stages(self) -> None: """Prevent all the parameters from being optimized before `self.frozen_stages`.""" if self.frozen_stages >= 0: self.conv1.eval() for param in self.conv1.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, f'layer{i}') m.eval() for param in m.parameters(): param.requires_grad = False if i != len(self.res_layers) and self.lateral: # No fusion needed in the final stage lateral_name = self.lateral_connections[i - 1] conv_lateral = getattr(self, lateral_name) conv_lateral.eval() for param in conv_lateral.parameters(): param.requires_grad = False
Prevent all the parameters from being optimized before `self.frozen_stages`.
_freeze_stages
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def init_weights(self, pretrained: Optional[str] = None) -> None: """Initiate the parameters either from existing checkpoint or from scratch.""" if pretrained: self.pretrained = pretrained # Override the init_weights of i3d super().init_weights() for module_name in self.lateral_connections: layer = getattr(self, module_name) for m in layer.modules(): if isinstance(m, (nn.Conv3d, nn.Conv2d)): kaiming_init(m)
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def build_pathway(cfg: Dict, *args, **kwargs) -> nn.Module: """Build pathway. Args: cfg (dict): cfg should contain: - type (str): identify backbone type. Returns: nn.Module: Created pathway. """ if not (isinstance(cfg, dict) and 'type' in cfg): raise TypeError('cfg must be a dict containing the key "type"') cfg_ = cfg.copy() pathway_type = cfg_.pop('type') if pathway_type not in pathway_cfg: raise KeyError(f'Unrecognized pathway type {pathway_type}') pathway_cls = pathway_cfg[pathway_type] pathway = pathway_cls(*args, **kwargs, **cfg_) return pathway
Build pathway. Args: cfg (dict): cfg should contain: - type (str): identify backbone type. Returns: nn.Module: Created pathway.
build_pathway
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def init_weights(self, pretrained: Optional[str] = None) -> None: """Initiate the parameters either from existing checkpoint or from scratch.""" if pretrained: self.pretrained = pretrained if isinstance(self.pretrained, str): logger = MMLogger.get_current_instance() msg = f'load model from: {self.pretrained}' print_log(msg, logger=logger) # Directly load 3D model. load_checkpoint(self, self.pretrained, strict=True, logger=logger) elif self.pretrained is None: # Init two branch separately. self.fast_path.init_weights() self.slow_path.init_weights() else: raise TypeError('pretrained must be a str or None')
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def forward(self, x: torch.Tensor) -> tuple: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: tuple[torch.Tensor]: The feature of the input samples extracted by the backbone. """ x_slow = nn.functional.interpolate( x, mode='nearest', scale_factor=(1.0 / self.resample_rate, 1.0, 1.0)) x_slow = self.slow_path.conv1(x_slow) x_slow = self.slow_path.maxpool(x_slow) x_fast = nn.functional.interpolate( x, mode='nearest', scale_factor=(1.0 / (self.resample_rate // self.speed_ratio), 1.0, 1.0)) x_fast = self.fast_path.conv1(x_fast) x_fast = self.fast_path.maxpool(x_fast) if self.slow_path.lateral: x_fast_lateral = self.slow_path.conv1_lateral(x_fast) x_slow = torch.cat((x_slow, x_fast_lateral), dim=1) for i, layer_name in enumerate(self.slow_path.res_layers): res_layer = getattr(self.slow_path, layer_name) x_slow = res_layer(x_slow) res_layer_fast = getattr(self.fast_path, layer_name) x_fast = res_layer_fast(x_fast) if (i != len(self.slow_path.res_layers) - 1 and self.slow_path.lateral): # No fusion needed in the final stage lateral_name = self.slow_path.lateral_connections[i] conv_lateral = getattr(self.slow_path, lateral_name) x_fast_lateral = conv_lateral(x_fast) x_slow = torch.cat((x_slow, x_fast_lateral), dim=1) out = (x_slow, x_fast) return out
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: tuple[torch.Tensor]: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def init_weights(self) -> None: """Initiate the parameters either from existing checkpoint or from scratch.""" for m in self.modules(): if isinstance(m, nn.Conv3d): kaiming_init(m) elif isinstance(m, _BatchNorm): constant_init(m, 1) if isinstance(self.pretrained, str): logger = MMLogger.get_current_instance() msg = f'load model from: {self.pretrained}' print_log(msg, logger=logger) load_checkpoint(self, self.pretrained, strict=True, logger=logger) elif self.pretrained is None: # Init two branch separately. self.rgb_path.init_weights() self.pose_path.init_weights() else: raise TypeError('pretrained must be a str or None')
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/rgbposeconv3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/rgbposeconv3d.py
Apache-2.0
def forward(self, imgs: torch.Tensor, heatmap_imgs: torch.Tensor) -> tuple: """Defines the computation performed at every call. Args: imgs (torch.Tensor): The input data. heatmap_imgs (torch.Tensor): The input data. Returns: tuple[torch.Tensor]: The feature of the input samples extracted by the backbone. """ if self.training: rgb_drop_path = torch.rand(1) < self.rgb_drop_path pose_drop_path = torch.rand(1) < self.pose_drop_path else: rgb_drop_path, pose_drop_path = False, False # We assume base_channel for RGB and Pose are 64 and 32. x_rgb = self.rgb_path.conv1(imgs) x_rgb = self.rgb_path.maxpool(x_rgb) # N x 64 x 8 x 56 x 56 x_pose = self.pose_path.conv1(heatmap_imgs) x_pose = self.pose_path.maxpool(x_pose) x_rgb = self.rgb_path.layer1(x_rgb) x_rgb = self.rgb_path.layer2(x_rgb) x_pose = self.pose_path.layer1(x_pose) if hasattr(self.rgb_path, 'layer2_lateral'): feat = x_pose.detach() if self.rgb_detach else x_pose x_pose_lateral = self.rgb_path.layer2_lateral(feat) if rgb_drop_path: x_pose_lateral = x_pose_lateral.new_zeros(x_pose_lateral.shape) if hasattr(self.pose_path, 'layer1_lateral'): feat = x_rgb.detach() if self.pose_detach else x_rgb x_rgb_lateral = self.pose_path.layer1_lateral(feat) if pose_drop_path: x_rgb_lateral = x_rgb_lateral.new_zeros(x_rgb_lateral.shape) if hasattr(self.rgb_path, 'layer2_lateral'): x_rgb = torch.cat((x_rgb, x_pose_lateral), dim=1) if hasattr(self.pose_path, 'layer1_lateral'): x_pose = torch.cat((x_pose, x_rgb_lateral), dim=1) x_rgb = self.rgb_path.layer3(x_rgb) x_pose = self.pose_path.layer2(x_pose) if hasattr(self.rgb_path, 'layer3_lateral'): feat = x_pose.detach() if self.rgb_detach else x_pose x_pose_lateral = self.rgb_path.layer3_lateral(feat) if rgb_drop_path: x_pose_lateral = x_pose_lateral.new_zeros(x_pose_lateral.shape) if hasattr(self.pose_path, 'layer2_lateral'): feat = x_rgb.detach() if self.pose_detach else x_rgb x_rgb_lateral = self.pose_path.layer2_lateral(feat) if pose_drop_path: x_rgb_lateral = x_rgb_lateral.new_zeros(x_rgb_lateral.shape) if hasattr(self.rgb_path, 'layer3_lateral'): x_rgb = torch.cat((x_rgb, x_pose_lateral), dim=1) if hasattr(self.pose_path, 'layer2_lateral'): x_pose = torch.cat((x_pose, x_rgb_lateral), dim=1) x_rgb = self.rgb_path.layer4(x_rgb) x_pose = self.pose_path.layer3(x_pose) return x_rgb, x_pose
Defines the computation performed at every call. Args: imgs (torch.Tensor): The input data. heatmap_imgs (torch.Tensor): The input data. Returns: tuple[torch.Tensor]: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/rgbposeconv3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/rgbposeconv3d.py
Apache-2.0
def resize_pos_embed(pos_embed: torch.Tensor, src_shape: Tuple[int], dst_shape: Tuple[int], mode: str = 'trilinear', num_extra_tokens: int = 1) -> torch.Tensor: """Resize pos_embed weights. Args: pos_embed (torch.Tensor): Position embedding weights with shape [1, L, C]. src_shape (tuple): The resolution of downsampled origin training image, in format (T, H, W). dst_shape (tuple): The resolution of downsampled new training image, in format (T, H, W). mode (str): Algorithm used for upsampling. Choose one from 'nearest', 'linear', 'bilinear', 'bicubic' and 'trilinear'. Defaults to 'trilinear'. num_extra_tokens (int): The number of extra tokens, such as cls_token. Defaults to 1. Returns: torch.Tensor: The resized pos_embed of shape [1, L_new, C] """ if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1] \ and src_shape[2] == dst_shape[2]: return pos_embed assert pos_embed.ndim == 3, 'shape of pos_embed must be [1, L, C]' _, L, C = pos_embed.shape src_t, src_h, src_w = src_shape assert L == src_t * src_h * src_w + num_extra_tokens, \ f"The length of `pos_embed` ({L}) doesn't match the expected " \ f'shape ({src_t}*{src_h}*{src_w}+{num_extra_tokens}).' \ 'Please check the `img_size` argument.' extra_tokens = pos_embed[:, :num_extra_tokens] src_weight = pos_embed[:, num_extra_tokens:] src_weight = src_weight.reshape(1, src_t, src_h, src_w, C).permute(0, 4, 1, 2, 3) dst_weight = F.interpolate( src_weight, size=dst_shape, align_corners=False, mode=mode) dst_weight = torch.flatten(dst_weight, 2).transpose(1, 2) return torch.cat((extra_tokens, dst_weight), dim=1)
Resize pos_embed weights. Args: pos_embed (torch.Tensor): Position embedding weights with shape [1, L, C]. src_shape (tuple): The resolution of downsampled origin training image, in format (T, H, W). dst_shape (tuple): The resolution of downsampled new training image, in format (T, H, W). mode (str): Algorithm used for upsampling. Choose one from 'nearest', 'linear', 'bilinear', 'bicubic' and 'trilinear'. Defaults to 'trilinear'. num_extra_tokens (int): The number of extra tokens, such as cls_token. Defaults to 1. Returns: torch.Tensor: The resized pos_embed of shape [1, L_new, C]
resize_pos_embed
python
open-mmlab/mmaction2
mmaction/models/backbones/mvit.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mvit.py
Apache-2.0
def resize_decomposed_rel_pos(rel_pos: torch.Tensor, q_size: int, k_size: int) -> torch.Tensor: """Get relative positional embeddings according to the relative positions of query and key sizes. Args: rel_pos (Tensor): relative position embeddings (L, C). q_size (int): size of query q. k_size (int): size of key k. Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos if needed. if rel_pos.shape[0] != max_rel_dist: # Interpolate rel pos. resized = F.interpolate( # (L, C) -> (1, C, L) rel_pos.transpose(0, 1).unsqueeze(0), size=max_rel_dist, mode='linear', ) # (1, C, L) -> (L, C) resized = resized.squeeze(0).transpose(0, 1) else: resized = rel_pos # Scale the coords with short length if shapes for q and k are different. q_h_ratio = max(k_size / q_size, 1.0) k_h_ratio = max(q_size / k_size, 1.0) q_coords = torch.arange(q_size)[:, None] * q_h_ratio k_coords = torch.arange(k_size)[None, :] * k_h_ratio relative_coords = (q_coords - k_coords) + (k_size - 1) * k_h_ratio return resized[relative_coords.long()]
Get relative positional embeddings according to the relative positions of query and key sizes. Args: rel_pos (Tensor): relative position embeddings (L, C). q_size (int): size of query q. k_size (int): size of key k. Returns: Extracted positional embeddings according to relative positions.
resize_decomposed_rel_pos
python
open-mmlab/mmaction2
mmaction/models/backbones/mvit.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mvit.py
Apache-2.0
def add_decomposed_rel_pos(attn: torch.Tensor, q: torch.Tensor, q_shape: Sequence[int], k_shape: Sequence[int], rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, rel_pos_t: torch.Tensor, with_cls_token: bool = False) -> torch.Tensor: """Spatiotemporal Relative Positional Embeddings.""" sp_idx = 1 if with_cls_token else 0 B, num_heads, _, C = q.shape q_t, q_h, q_w = q_shape k_t, k_h, k_w = k_shape Rt = resize_decomposed_rel_pos(rel_pos_t, q_t, k_t) Rh = resize_decomposed_rel_pos(rel_pos_h, q_h, k_h) Rw = resize_decomposed_rel_pos(rel_pos_w, q_w, k_w) r_q = q[:, :, sp_idx:].reshape(B, num_heads, q_t, q_h, q_w, C) rel_t = torch.einsum('bythwc,tkc->bythwk', r_q, Rt) rel_h = torch.einsum('bythwc,hkc->bythwk', r_q, Rh) rel_w = torch.einsum('bythwc,wkc->bythwk', r_q, Rw) rel_pos_embed = ( rel_t[:, :, :, :, :, :, None, None] + rel_h[:, :, :, :, :, None, :, None] + rel_w[:, :, :, :, :, None, None, :]) attn_map = attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_t, q_h, q_w, k_t, k_h, k_w) attn_map += rel_pos_embed attn[:, :, sp_idx:, sp_idx:] = attn_map.view(B, -1, q_t * q_h * q_w, k_t * k_h * k_w) return attn
Spatiotemporal Relative Positional Embeddings.
add_decomposed_rel_pos
python
open-mmlab/mmaction2
mmaction/models/backbones/mvit.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mvit.py
Apache-2.0
def attention_pool(x: torch.Tensor, pool: nn.Module, in_size: Tuple[int], with_cls_token: bool = False, norm: Optional[nn.Module] = None) -> tuple: """Pooling the feature tokens. Args: x (torch.Tensor): The input tensor, should be with shape ``(B, num_heads, L, C)`` or ``(B, L, C)``. pool (nn.Module): The pooling module. in_size (Tuple[int]): The shape of the input feature map. with_cls_token (bool): Whether concatenating class token into video tokens as transformer input. Defaults to True. norm (nn.Module, optional): The normalization module. Defaults to None. """ ndim = x.ndim if ndim == 4: B, num_heads, L, C = x.shape elif ndim == 3: num_heads = 1 B, L, C = x.shape x = x.unsqueeze(1) else: raise RuntimeError(f'Unsupported input dimension {x.shape}') T, H, W = in_size assert L == T * H * W + with_cls_token if with_cls_token: cls_tok, x = x[:, :, :1, :], x[:, :, 1:, :] # (B, num_heads, T*H*W, C) -> (B*num_heads, C, T, H, W) x = x.reshape(B * num_heads, T, H, W, C).permute(0, 4, 1, 2, 3).contiguous() x = pool(x) out_size = x.shape[2:] # (B*num_heads, C, T', H', W') -> (B, num_heads, T'*H'*W', C) x = x.reshape(B, num_heads, C, -1).transpose(2, 3) if with_cls_token: x = torch.cat((cls_tok, x), dim=2) if norm is not None: x = norm(x) if ndim == 3: x = x.squeeze(1) return x, out_size
Pooling the feature tokens. Args: x (torch.Tensor): The input tensor, should be with shape ``(B, num_heads, L, C)`` or ``(B, L, C)``. pool (nn.Module): The pooling module. in_size (Tuple[int]): The shape of the input feature map. with_cls_token (bool): Whether concatenating class token into video tokens as transformer input. Defaults to True. norm (nn.Module, optional): The normalization module. Defaults to None.
attention_pool
python
open-mmlab/mmaction2
mmaction/models/backbones/mvit.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mvit.py
Apache-2.0
def init_weights(self) -> None: """Weight initialization.""" super().init_weights() if (isinstance(self.init_cfg, dict) and get_str_type(self.init_cfg['type']) == 'Pretrained'): # Suppress rel_pos_zero_init if use pretrained model. return if not self.rel_pos_zero_init: trunc_normal_(self.rel_pos_h, std=0.02) trunc_normal_(self.rel_pos_w, std=0.02) trunc_normal_(self.rel_pos_t, std=0.02)
Weight initialization.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/mvit.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mvit.py
Apache-2.0
def forward(self, x: torch.Tensor, in_size: Tuple[int]) -> tuple: """Forward the MultiScaleAttention.""" B, N, _ = x.shape # (B, H*W, C) # qkv: (B, H*W, 3, num_heads, C) qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1) # q, k, v: (B, num_heads, H*W, C) q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(0) q, q_shape = attention_pool( q, self.pool_q, in_size, norm=self.norm_q, with_cls_token=self.with_cls_token) k, k_shape = attention_pool( k, self.pool_k, in_size, norm=self.norm_k, with_cls_token=self.with_cls_token) v, v_shape = attention_pool( v, self.pool_v, in_size, norm=self.norm_v, with_cls_token=self.with_cls_token) attn = (q * self.scale) @ k.transpose(-2, -1) if self.rel_pos_embed: attn = add_decomposed_rel_pos(attn, q, q_shape, k_shape, self.rel_pos_h, self.rel_pos_w, self.rel_pos_t, self.with_cls_token) attn = attn.softmax(dim=-1) x = attn @ v if self.residual_pooling: if self.with_cls_token: x[:, :, 1:, :] += q[:, :, 1:, :] else: x = x + q # (B, num_heads, H'*W', C'//num_heads) -> (B, H'*W', C') x = x.transpose(1, 2).reshape(B, -1, self.out_dims) x = self.proj(x) return x, q_shape
Forward the MultiScaleAttention.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/mvit.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mvit.py
Apache-2.0
def forward(self, x: torch.Tensor) ->\ Tuple[Union[torch.Tensor, List[torch.Tensor]]]: """Forward the MViT.""" B = x.shape[0] x, patch_resolution = self.patch_embed(x) cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat((cls_tokens, x), dim=1) if self.use_abs_pos_embed: x = x + resize_pos_embed( self.pos_embed, self.patch_resolution, patch_resolution, mode=self.interpolate_mode, num_extra_tokens=self.num_extra_tokens) if not self.with_cls_token: # Remove class token for transformer encoder input x = x[:, 1:] outs = [] for i, block in enumerate(self.blocks): x, patch_resolution = block(x, patch_resolution) if i in self.stage_indices: stage_index = self.stage_indices[i] if stage_index in self.out_scales: B, _, C = x.shape x = getattr(self, f'norm{stage_index}')(x) tokens = x.transpose(1, 2) if self.with_cls_token: patch_token = tokens[:, :, 1:].reshape( B, C, *patch_resolution) cls_token = tokens[:, :, 0] else: patch_token = tokens.reshape(B, C, *patch_resolution) cls_token = None if self.output_cls_token: out = [patch_token, cls_token] else: out = patch_token outs.append(out) return tuple(outs)
Forward the MViT.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/mvit.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mvit.py
Apache-2.0
def conv_3xnxn(inp: int, oup: int, kernel_size: int = 3, stride: int = 3, groups: int = 1): """3D convolution with kernel size of 3xnxn. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. kernel_size (int): The spatial kernel size (i.e., n). Defaults to 3. stride (int): The spatial stride. Defaults to 3. groups (int): Group number of operated features. Defaults to 1. """ return nn.Conv3d( inp, oup, (3, kernel_size, kernel_size), (2, stride, stride), (1, 0, 0), groups=groups)
3D convolution with kernel size of 3xnxn. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. kernel_size (int): The spatial kernel size (i.e., n). Defaults to 3. stride (int): The spatial stride. Defaults to 3. groups (int): Group number of operated features. Defaults to 1.
conv_3xnxn
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py
Apache-2.0
def conv_1xnxn(inp: int, oup: int, kernel_size: int = 3, stride: int = 3, groups: int = 1): """3D convolution with kernel size of 1xnxn. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. kernel_size (int): The spatial kernel size (i.e., n). Defaults to 3. stride (int): The spatial stride. Defaults to 3. groups (int): Group number of operated features. Defaults to 1. """ return nn.Conv3d( inp, oup, (1, kernel_size, kernel_size), (1, stride, stride), (0, 0, 0), groups=groups)
3D convolution with kernel size of 1xnxn. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. kernel_size (int): The spatial kernel size (i.e., n). Defaults to 3. stride (int): The spatial stride. Defaults to 3. groups (int): Group number of operated features. Defaults to 1.
conv_1xnxn
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py
Apache-2.0
def conv_1x1x1(inp: int, oup: int, groups: int = 1): """3D convolution with kernel size of 1x1x1. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. groups (int): Group number of operated features. Defaults to 1. """ return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups)
3D convolution with kernel size of 1x1x1. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. groups (int): Group number of operated features. Defaults to 1.
conv_1x1x1
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py
Apache-2.0
def conv_3x3x3(inp: int, oup: int, groups: int = 1): """3D convolution with kernel size of 3x3x3. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. groups (int): Group number of operated features. Defaults to 1. """ return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups)
3D convolution with kernel size of 3x3x3. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. groups (int): Group number of operated features. Defaults to 1.
conv_3x3x3
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py
Apache-2.0
def conv_5x5x5(inp: int, oup: int, groups: int = 1): """3D convolution with kernel size of 5x5x5. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. groups (int): Group number of operated features. Defaults to 1. """ return nn.Conv3d(inp, oup, (5, 5, 5), (1, 1, 1), (2, 2, 2), groups=groups)
3D convolution with kernel size of 5x5x5. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. groups (int): Group number of operated features. Defaults to 1.
conv_5x5x5
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py
Apache-2.0
def bn_3d(dim): """3D batch normalization. Args: dim (int): Dimension of input features. """ return nn.BatchNorm3d(dim)
3D batch normalization. Args: dim (int): Dimension of input features.
bn_3d
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py
Apache-2.0
def _load_pretrained(self, pretrained: str = None) -> None: """Load ImageNet-1K pretrained model. The model is pretrained with ImageNet-1K. https://github.com/Sense-X/UniFormer Args: pretrained (str): Model name of ImageNet-1K pretrained model. Defaults to None. """ if pretrained is not None: model_path = _MODELS[pretrained] logger.info(f'Load ImageNet pretrained model from {model_path}') state_dict = _load_checkpoint(model_path, map_location='cpu') state_dict_3d = self.state_dict() for k in state_dict.keys(): if k in state_dict_3d.keys( ) and state_dict[k].shape != state_dict_3d[k].shape: if len(state_dict_3d[k].shape) <= 2: logger.info(f'Ignore: {k}') continue logger.info(f'Inflate: {k}, {state_dict[k].shape}' + f' => {state_dict_3d[k].shape}') time_dim = state_dict_3d[k].shape[2] state_dict[k] = self._inflate_weight( state_dict[k], time_dim) self.load_state_dict(state_dict, strict=False)
Load ImageNet-1K pretrained model. The model is pretrained with ImageNet-1K. https://github.com/Sense-X/UniFormer Args: pretrained (str): Model name of ImageNet-1K pretrained model. Defaults to None.
_load_pretrained
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py
Apache-2.0
def init_weights(self): """Initialize the weights in backbone.""" if self.pretrained2d: logger = MMLogger.get_current_instance() logger.info(f'load model from: {self.pretrained}') self._load_pretrained(self.pretrained) else: if self.pretrained: self.init_cfg = dict( type='Pretrained', checkpoint=self.pretrained) super().init_weights()
Initialize the weights in backbone.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py
Apache-2.0
def _round_width(width, multiplier, min_width=8, divisor=8): """Round width of filters based on width multiplier.""" width *= multiplier min_width = min_width or divisor width_out = max(min_width, int(width + divisor / 2) // divisor * divisor) if width_out < 0.9 * width: width_out += divisor return int(width_out)
Round width of filters based on width multiplier.
_round_width
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The output of the module. """ module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x
Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The output of the module.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def _inner_forward(x): """Forward wrapper for utilizing checkpoint.""" identity = x out = self.conv1(x) out = self.conv2(out) if self.se_ratio is not None: out = self.se_module(out) out = self.swish(out) out = self.conv3(out) if self.downsample is not None: identity = self.downsample(x) out = out + identity return out
Forward wrapper for utilizing checkpoint.
forward._inner_forward
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call.""" def _inner_forward(x): """Forward wrapper for utilizing checkpoint.""" identity = x out = self.conv1(x) out = self.conv2(out) if self.se_ratio is not None: out = self.se_module(out) out = self.swish(out) out = self.conv3(out) if self.downsample is not None: identity = self.downsample(x) out = out + identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def _round_width(width, multiplier, min_depth=8, divisor=8): """Round width of filters based on width multiplier.""" if not multiplier: return width width *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(width + divisor / 2) // divisor * divisor) if new_filters < 0.9 * width: new_filters += divisor return int(new_filters)
Round width of filters based on width multiplier.
_round_width
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def _round_repeats(repeats, multiplier): """Round number of layers based on depth multiplier.""" if not multiplier: return repeats return int(math.ceil(multiplier * repeats))
Round number of layers based on depth multiplier.
_round_repeats
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def make_res_layer(self, block, layer_inplanes, inplanes, planes, blocks, spatial_stride=1, se_style='half', se_ratio=None, use_swish=True, norm_cfg=None, act_cfg=None, conv_cfg=None, with_cp=False, **kwargs): """Build residual layer for ResNet3D. Args: block (nn.Module): Residual module to be built. layer_inplanes (int): Number of channels for the input feature of the res layer. inplanes (int): Number of channels for the input feature in each block, which equals to base_channels * gamma_w. planes (int): Number of channels for the output feature in each block, which equals to base_channel * gamma_w * gamma_b. blocks (int): Number of residual blocks. spatial_stride (int): Spatial strides in residual and conv layers. Default: 1. se_style (str): The style of inserting SE modules into BlockX3D, 'half' denotes insert into half of the blocks, while 'all' denotes insert into all blocks. Default: 'half'. se_ratio (float | None): The reduction ratio of squeeze and excitation unit. If set as None, it means not using SE unit. Default: None. use_swish (bool): Whether to use swish as the activation function before and after the 3x3x3 conv. Default: True. conv_cfg (dict | None): Config for norm layers. Default: None. norm_cfg (dict | None): Config for norm layers. Default: None. act_cfg (dict | None): Config for activate layers. Default: None. with_cp (bool | None): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. Returns: nn.Module: A residual layer for the given config. """ downsample = None if spatial_stride != 1 or layer_inplanes != inplanes: downsample = ConvModule( layer_inplanes, inplanes, kernel_size=1, stride=(1, spatial_stride, spatial_stride), padding=0, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) use_se = [False] * blocks if self.se_style == 'all': use_se = [True] * blocks elif self.se_style == 'half': use_se = [i % 2 == 0 for i in range(blocks)] else: raise NotImplementedError layers = [] layers.append( block( layer_inplanes, planes, inplanes, spatial_stride=spatial_stride, downsample=downsample, se_ratio=se_ratio if use_se[0] else None, use_swish=use_swish, norm_cfg=norm_cfg, conv_cfg=conv_cfg, act_cfg=act_cfg, with_cp=with_cp, **kwargs)) for i in range(1, blocks): layers.append( block( inplanes, planes, inplanes, spatial_stride=1, se_ratio=se_ratio if use_se[i] else None, use_swish=use_swish, norm_cfg=norm_cfg, conv_cfg=conv_cfg, act_cfg=act_cfg, with_cp=with_cp, **kwargs)) return nn.Sequential(*layers)
Build residual layer for ResNet3D. Args: block (nn.Module): Residual module to be built. layer_inplanes (int): Number of channels for the input feature of the res layer. inplanes (int): Number of channels for the input feature in each block, which equals to base_channels * gamma_w. planes (int): Number of channels for the output feature in each block, which equals to base_channel * gamma_w * gamma_b. blocks (int): Number of residual blocks. spatial_stride (int): Spatial strides in residual and conv layers. Default: 1. se_style (str): The style of inserting SE modules into BlockX3D, 'half' denotes insert into half of the blocks, while 'all' denotes insert into all blocks. Default: 'half'. se_ratio (float | None): The reduction ratio of squeeze and excitation unit. If set as None, it means not using SE unit. Default: None. use_swish (bool): Whether to use swish as the activation function before and after the 3x3x3 conv. Default: True. conv_cfg (dict | None): Config for norm layers. Default: None. norm_cfg (dict | None): Config for norm layers. Default: None. act_cfg (dict | None): Config for activate layers. Default: None. with_cp (bool | None): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. Returns: nn.Module: A residual layer for the given config.
make_res_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def _make_stem_layer(self): """Construct the stem layers consists of a conv+norm+act module and a pooling layer.""" self.conv1_s = ConvModule( self.in_channels, self.base_channels, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1), bias=False, conv_cfg=self.conv_cfg, norm_cfg=None, act_cfg=None) self.conv1_t = ConvModule( self.base_channels, self.base_channels, kernel_size=(5, 1, 1), stride=(1, 1, 1), padding=(2, 0, 0), groups=self.base_channels, bias=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
Construct the stem layers consists of a conv+norm+act module and a pooling layer.
_make_stem_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def _freeze_stages(self): """Prevent all the parameters from being optimized before ``self.frozen_stages``.""" if self.frozen_stages >= 0: self.conv1_s.eval() self.conv1_t.eval() for param in self.conv1_s.parameters(): param.requires_grad = False for param in self.conv1_t.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, f'layer{i}') m.eval() for param in m.parameters(): param.requires_grad = False
Prevent all the parameters from being optimized before ``self.frozen_stages``.
_freeze_stages
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def init_weights(self): """Initiate the parameters either from existing checkpoint or from scratch.""" if isinstance(self.pretrained, str): logger = MMLogger.get_current_instance() logger.info(f'load model from: {self.pretrained}') load_checkpoint(self, self.pretrained, strict=False, logger=logger) elif self.pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv3d): kaiming_init(m) elif isinstance(m, _BatchNorm): constant_init(m, 1) if self.zero_init_residual: for m in self.modules(): if isinstance(m, BlockX3D): constant_init(m.conv3.bn, 0) else: raise TypeError('pretrained must be a str or None')
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The feature of the input samples extracted by the backbone. """ x = self.conv1_s(x) x = self.conv1_t(x) for layer_name in self.res_layers: res_layer = getattr(self, layer_name) x = res_layer(x) x = self.conv5(x) return x
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def train(self, mode=True): """Set the optimization status when training.""" super().train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval()
Set the optimization status when training.
train
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def _load_pretrained(self, pretrained: str = None) -> None: """Load CLIP pretrained visual encoder. The visual encoder is extracted from CLIP. https://github.com/openai/CLIP Args: pretrained (str): Model name of pretrained CLIP visual encoder. Defaults to None. """ assert pretrained is not None, \ 'please specify clip pretraied checkpoint' model_path = _MODELS[pretrained] logger.info(f'Load CLIP pretrained model from {model_path}') state_dict = _load_checkpoint(model_path, map_location='cpu') state_dict_3d = self.state_dict() for k in state_dict.keys(): if k in state_dict_3d.keys( ) and state_dict[k].shape != state_dict_3d[k].shape: if len(state_dict_3d[k].shape) <= 2: logger.info(f'Ignore: {k}') continue logger.info(f'Inflate: {k}, {state_dict[k].shape}' + f' => {state_dict_3d[k].shape}') time_dim = state_dict_3d[k].shape[2] state_dict[k] = self._inflate_weight(state_dict[k], time_dim) self.load_state_dict(state_dict, strict=False)
Load CLIP pretrained visual encoder. The visual encoder is extracted from CLIP. https://github.com/openai/CLIP Args: pretrained (str): Model name of pretrained CLIP visual encoder. Defaults to None.
_load_pretrained
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformerv2.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformerv2.py
Apache-2.0
def init_weights(self): """Initialize the weights in backbone.""" if self.clip_pretrained: logger = MMLogger.get_current_instance() logger.info(f'load model from: {self.pretrained}') self._load_pretrained(self.pretrained) else: if self.pretrained: self.init_cfg = dict( type='Pretrained', checkpoint=self.pretrained) super().init_weights()
Initialize the weights in backbone.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformerv2.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformerv2.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module. """ identity = x out = self.conv1(x) out = self.conv2(out) if self.downsample is not None: identity = self.downsample(x) out = out + identity out = self.relu(out) return out
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def _inner_forward(x): """Forward wrapper for utilizing checkpoint.""" identity = x out = self.conv1(x) out = self.conv2(out) out = self.conv3(out) if self.downsample is not None: identity = self.downsample(x) out = out + identity return out
Forward wrapper for utilizing checkpoint.
forward._inner_forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module. """ def _inner_forward(x): """Forward wrapper for utilizing checkpoint.""" identity = x out = self.conv1(x) out = self.conv2(out) out = self.conv3(out) if self.downsample is not None: identity = self.downsample(x) out = out + identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def make_res_layer(block: nn.Module, inplanes: int, planes: int, blocks: int, stride: int = 1, dilation: int = 1, style: str = 'pytorch', conv_cfg: Optional[ConfigType] = None, norm_cfg: Optional[ConfigType] = None, act_cfg: Optional[ConfigType] = None, with_cp: bool = False) -> nn.Module: """Build residual layer for ResNet. Args: block: (nn.Module): Residual module to be built. inplanes (int): Number of channels for the input feature in each block. planes (int): Number of channels for the output feature in each block. blocks (int): Number of residual blocks. stride (int): Stride in the conv layer. Defaults to 1. dilation (int): Spacing between kernel elements. Defaults to 1. style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``, the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. Defaults to ``pytorch``. conv_cfg (Union[dict, ConfigDict], optional): Config for norm layers. Defaults to None. norm_cfg (Union[dict, ConfigDict], optional): Config for norm layers. Defaults to None. act_cfg (Union[dict, ConfigDict], optional): Config for activate layers. Defaults to None. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. Returns: nn.Module: A residual layer for the given config. """ downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = ConvModule( inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) layers = [] layers.append( block( inplanes, planes, stride, dilation, downsample, style=style, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_cp=with_cp)) inplanes = planes * block.expansion for _ in range(1, blocks): layers.append( block( inplanes, planes, 1, dilation, style=style, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_cp=with_cp)) return nn.Sequential(*layers)
Build residual layer for ResNet. Args: block: (nn.Module): Residual module to be built. inplanes (int): Number of channels for the input feature in each block. planes (int): Number of channels for the output feature in each block. blocks (int): Number of residual blocks. stride (int): Stride in the conv layer. Defaults to 1. dilation (int): Spacing between kernel elements. Defaults to 1. style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``, the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. Defaults to ``pytorch``. conv_cfg (Union[dict, ConfigDict], optional): Config for norm layers. Defaults to None. norm_cfg (Union[dict, ConfigDict], optional): Config for norm layers. Defaults to None. act_cfg (Union[dict, ConfigDict], optional): Config for activate layers. Defaults to None. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. Returns: nn.Module: A residual layer for the given config.
make_res_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def _make_stem_layer(self) -> None: """Construct the stem layers consists of a conv+norm+act module and a pooling layer.""" self.conv1 = ConvModule( self.in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
Construct the stem layers consists of a conv+norm+act module and a pooling layer.
_make_stem_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def _load_conv_params(conv: nn.Module, state_dict_tv: OrderedDict, module_name_tv: str, loaded_param_names: List[str]) -> None: """Load the conv parameters of resnet from torchvision. Args: conv (nn.Module): The destination conv module. state_dict_tv (OrderedDict): The state dict of pretrained torchvision model. module_name_tv (str): The name of corresponding conv module in the torchvision model. loaded_param_names (list[str]): List of parameters that have been loaded. """ weight_tv_name = module_name_tv + '.weight' if conv.weight.data.shape == state_dict_tv[weight_tv_name].shape: conv.weight.data.copy_(state_dict_tv[weight_tv_name]) loaded_param_names.append(weight_tv_name) if getattr(conv, 'bias') is not None: bias_tv_name = module_name_tv + '.bias' if conv.bias.data.shape == state_dict_tv[bias_tv_name].shape: conv.bias.data.copy_(state_dict_tv[bias_tv_name]) loaded_param_names.append(bias_tv_name)
Load the conv parameters of resnet from torchvision. Args: conv (nn.Module): The destination conv module. state_dict_tv (OrderedDict): The state dict of pretrained torchvision model. module_name_tv (str): The name of corresponding conv module in the torchvision model. loaded_param_names (list[str]): List of parameters that have been loaded.
_load_conv_params
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def _load_bn_params(bn: nn.Module, state_dict_tv: OrderedDict, module_name_tv: str, loaded_param_names: List[str]) -> None: """Load the bn parameters of resnet from torchvision. Args: bn (nn.Module): The destination bn module. state_dict_tv (OrderedDict): The state dict of pretrained torchvision model. module_name_tv (str): The name of corresponding bn module in the torchvision model. loaded_param_names (list[str]): List of parameters that have been loaded. """ for param_name, param in bn.named_parameters(): param_tv_name = f'{module_name_tv}.{param_name}' param_tv = state_dict_tv[param_tv_name] if param.data.shape == param_tv.shape: param.data.copy_(param_tv) loaded_param_names.append(param_tv_name) for param_name, param in bn.named_buffers(): param_tv_name = f'{module_name_tv}.{param_name}' # some buffers like num_batches_tracked may not exist if param_tv_name in state_dict_tv: param_tv = state_dict_tv[param_tv_name] if param.data.shape == param_tv.shape: param.data.copy_(param_tv) loaded_param_names.append(param_tv_name)
Load the bn parameters of resnet from torchvision. Args: bn (nn.Module): The destination bn module. state_dict_tv (OrderedDict): The state dict of pretrained torchvision model. module_name_tv (str): The name of corresponding bn module in the torchvision model. loaded_param_names (list[str]): List of parameters that have been loaded.
_load_bn_params
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def _load_torchvision_checkpoint(self, logger: mmengine.MMLogger = None) -> None: """Initiate the parameters from torchvision pretrained checkpoint.""" state_dict_torchvision = _load_checkpoint( self.pretrained, map_location='cpu') if 'state_dict' in state_dict_torchvision: state_dict_torchvision = state_dict_torchvision['state_dict'] loaded_param_names = [] for name, module in self.named_modules(): if isinstance(module, ConvModule): # we use a ConvModule to wrap conv+bn+relu layers, thus the # name mapping is needed if 'downsample' in name: # layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0 original_conv_name = name + '.0' # layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1 original_bn_name = name + '.1' else: # layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n} original_conv_name = name # layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n} original_bn_name = name.replace('conv', 'bn') self._load_conv_params(module.conv, state_dict_torchvision, original_conv_name, loaded_param_names) self._load_bn_params(module.bn, state_dict_torchvision, original_bn_name, loaded_param_names) # check if any parameters in the 2d checkpoint are not loaded remaining_names = set( state_dict_torchvision.keys()) - set(loaded_param_names) if remaining_names: logger.info( f'These parameters in pretrained checkpoint are not loaded' f': {remaining_names}')
Initiate the parameters from torchvision pretrained checkpoint.
_load_torchvision_checkpoint
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def init_weights(self) -> None: """Initiate the parameters either from existing checkpoint or from scratch.""" if isinstance(self.pretrained, str): logger = MMLogger.get_current_instance() if self.torchvision_pretrain: # torchvision's self._load_torchvision_checkpoint(logger) else: # ours if self.pretrained: self.init_cfg = dict( type='Pretrained', checkpoint=self.pretrained) super().init_weights() elif self.pretrained is None: super().init_weights() else: raise TypeError('pretrained must be a str or None')
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def forward(self, x: torch.Tensor) \ -> Union[torch.Tensor, Tuple[torch.Tensor]]: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: Union[torch.Tensor or Tuple[torch.Tensor]]: The feature of the input samples extracted by the backbone. """ x = self.conv1(x) x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i in self.out_indices: outs.append(x) if len(outs) == 1: return outs[0] return tuple(outs)
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: Union[torch.Tensor or Tuple[torch.Tensor]]: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def _freeze_stages(self) -> None: """Prevent all the parameters from being optimized before ``self.frozen_stages``.""" if self.frozen_stages >= 0: self.conv1.bn.eval() for m in self.conv1.modules(): for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, f'layer{i}') m.eval() for param in m.parameters(): param.requires_grad = False
Prevent all the parameters from being optimized before ``self.frozen_stages``.
_freeze_stages
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def _partial_bn(self) -> None: """Freezing BatchNorm2D except the first one.""" logger = MMLogger.get_current_instance() logger.info('Freezing BatchNorm2D except the first one.') count_bn = 0 for m in self.modules(): if isinstance(m, nn.BatchNorm2d): count_bn += 1 if count_bn >= 2: m.eval() # shutdown update in frozen mode m.weight.requires_grad = False m.bias.requires_grad = False
Freezing BatchNorm2D except the first one.
_partial_bn
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def train(self, mode: bool = True) -> None: """Set the optimization status when training.""" super().train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval() if mode and self.partial_bn: self._partial_bn()
Set the optimization status when training.
train
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module. """ def _inner_forward(x): identity = x out = self.conv1(x) out = self.conv2(out) out = self.conv3(out) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_audio.py
Apache-2.0
def make_res_layer(block: nn.Module, inplanes: int, planes: int, blocks: int, stride: int = 1, dilation: int = 1, factorize: int = 1, norm_cfg: Optional[ConfigType] = None, with_cp: bool = False) -> nn.Module: """Build residual layer for ResNetAudio. Args: block (nn.Module): Residual module to be built. inplanes (int): Number of channels for the input feature in each block. planes (int): Number of channels for the output feature in each block. blocks (int): Number of residual blocks. stride (int): Strides of residual blocks of each stage. Defaults to 1. dilation (int): Spacing between kernel elements. Defaults to 1. factorize (Uninon[int, Sequence[int]]): Determine whether to factorize for each block. Defaults to 1. norm_cfg (Union[dict, ConfigDict], optional): Config for norm layers. Defaults to None. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. Returns: nn.Module: A residual layer for the given config. """ factorize = factorize if not isinstance( factorize, int) else (factorize, ) * blocks assert len(factorize) == blocks downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = ConvModule( inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, norm_cfg=norm_cfg, act_cfg=None) layers = [] layers.append( block( inplanes, planes, stride, dilation, downsample, factorize=(factorize[0] == 1), norm_cfg=norm_cfg, with_cp=with_cp)) inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( inplanes, planes, 1, dilation, factorize=(factorize[i] == 1), norm_cfg=norm_cfg, with_cp=with_cp)) return nn.Sequential(*layers)
Build residual layer for ResNetAudio. Args: block (nn.Module): Residual module to be built. inplanes (int): Number of channels for the input feature in each block. planes (int): Number of channels for the output feature in each block. blocks (int): Number of residual blocks. stride (int): Strides of residual blocks of each stage. Defaults to 1. dilation (int): Spacing between kernel elements. Defaults to 1. factorize (Uninon[int, Sequence[int]]): Determine whether to factorize for each block. Defaults to 1. norm_cfg (Union[dict, ConfigDict], optional): Config for norm layers. Defaults to None. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. Returns: nn.Module: A residual layer for the given config.
make_res_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_audio.py
Apache-2.0
def _make_stem_layer(self) -> None: """Construct the stem layers consists of a ``conv+norm+act`` module and a pooling layer.""" self.conv1 = ConvModule( self.in_channels, self.base_channels, kernel_size=self.conv1_kernel, stride=self.conv1_stride, bias=False, conv_cfg=dict(type='ConvAudio', op='sum'), norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
Construct the stem layers consists of a ``conv+norm+act`` module and a pooling layer.
_make_stem_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_audio.py
Apache-2.0
def _freeze_stages(self) -> None: """Prevent all the parameters from being optimized before ``self.frozen_stages``.""" if self.frozen_stages >= 0: self.conv1.bn.eval() for m in [self.conv1.conv, self.conv1.bn]: for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, f'layer{i}') m.eval() for param in m.parameters(): param.requires_grad = False
Prevent all the parameters from being optimized before ``self.frozen_stages``.
_freeze_stages
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_audio.py
Apache-2.0
def init_weights(self) -> None: """Initiate the parameters either from existing checkpoint or from scratch.""" if isinstance(self.pretrained, str): logger = MMLogger.get_current_instance() logger.info(f'load model from: {self.pretrained}') load_checkpoint(self, self.pretrained, strict=False, logger=logger) elif self.pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, _BatchNorm): constant_init(m, 1) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck2dAudio): constant_init(m.conv3.bn, 0) else: raise TypeError('pretrained must be a str or None')
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_audio.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The feature of the input samples extracted by the backbone. """ x = self.conv1(x) for layer_name in self.res_layers: res_layer = getattr(self, layer_name) x = res_layer(x) return x
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_audio.py
Apache-2.0
def train(self, mode: bool = True) -> None: """Set the optimization status when training.""" super().train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval()
Set the optimization status when training.
train
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_audio.py
Apache-2.0
def _inner_forward(x): """Forward wrapper for utilizing checkpoint.""" identity = x out = self.conv1(x) out = self.conv2(out) if self.downsample is not None: identity = self.downsample(x) out = out + identity return out
Forward wrapper for utilizing checkpoint.
forward._inner_forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" def _inner_forward(x): """Forward wrapper for utilizing checkpoint.""" identity = x out = self.conv1(x) out = self.conv2(out) if self.downsample is not None: identity = self.downsample(x) out = out + identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) if self.non_local: out = self.non_local_block(out) return out
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def _inner_forward(x): """Forward wrapper for utilizing checkpoint.""" identity = x out = self.conv1(x) out = self.conv2(out) out = self.conv3(out) if self.downsample is not None: identity = self.downsample(x) out = out + identity return out
Forward wrapper for utilizing checkpoint.
forward._inner_forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" def _inner_forward(x): """Forward wrapper for utilizing checkpoint.""" identity = x out = self.conv1(x) out = self.conv2(out) out = self.conv3(out) if self.downsample is not None: identity = self.downsample(x) out = out + identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) if self.non_local: out = self.non_local_block(out) return out
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def _inflate_conv_params(conv3d: nn.Module, state_dict_2d: OrderedDict, module_name_2d: str, inflated_param_names: List[str]) -> None: """Inflate a conv module from 2d to 3d. Args: conv3d (nn.Module): The destination conv3d module. state_dict_2d (OrderedDict): The state dict of pretrained 2d model. module_name_2d (str): The name of corresponding conv module in the 2d model. inflated_param_names (list[str]): List of parameters that have been inflated. """ weight_2d_name = module_name_2d + '.weight' conv2d_weight = state_dict_2d[weight_2d_name] kernel_t = conv3d.weight.data.shape[2] new_weight = conv2d_weight.data.unsqueeze(2).expand_as( conv3d.weight) / kernel_t conv3d.weight.data.copy_(new_weight) inflated_param_names.append(weight_2d_name) if getattr(conv3d, 'bias') is not None: bias_2d_name = module_name_2d + '.bias' conv3d.bias.data.copy_(state_dict_2d[bias_2d_name]) inflated_param_names.append(bias_2d_name)
Inflate a conv module from 2d to 3d. Args: conv3d (nn.Module): The destination conv3d module. state_dict_2d (OrderedDict): The state dict of pretrained 2d model. module_name_2d (str): The name of corresponding conv module in the 2d model. inflated_param_names (list[str]): List of parameters that have been inflated.
_inflate_conv_params
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0