code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def _inflate_bn_params(bn3d: nn.Module, state_dict_2d: OrderedDict, module_name_2d: str, inflated_param_names: List[str]) -> None: """Inflate a norm module from 2d to 3d. Args: bn3d (nn.Module): The destination bn3d module. state_dict_2d (OrderedDict): The state dict of pretrained 2d model. module_name_2d (str): The name of corresponding bn module in the 2d model. inflated_param_names (list[str]): List of parameters that have been inflated. """ for param_name, param in bn3d.named_parameters(): param_2d_name = f'{module_name_2d}.{param_name}' param_2d = state_dict_2d[param_2d_name] if param.data.shape != param_2d.shape: warnings.warn(f'The parameter of {module_name_2d} is not' 'loaded due to incompatible shapes. ') return param.data.copy_(param_2d) inflated_param_names.append(param_2d_name) for param_name, param in bn3d.named_buffers(): param_2d_name = f'{module_name_2d}.{param_name}' # some buffers like num_batches_tracked may not exist in old # checkpoints if param_2d_name in state_dict_2d: param_2d = state_dict_2d[param_2d_name] param.data.copy_(param_2d) inflated_param_names.append(param_2d_name)
Inflate a norm module from 2d to 3d. Args: bn3d (nn.Module): The destination bn3d module. state_dict_2d (OrderedDict): The state dict of pretrained 2d model. module_name_2d (str): The name of corresponding bn module in the 2d model. inflated_param_names (list[str]): List of parameters that have been inflated.
_inflate_bn_params
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def _inflate_weights(self, logger: MMLogger) -> None: """Inflate the resnet2d parameters to resnet3d. The differences between resnet3d and resnet2d mainly lie in an extra axis of conv kernel. To utilize the pretrained parameters in 2d model, the weight of conv2d models should be inflated to fit in the shapes of the 3d counterpart. Args: logger (MMLogger): The logger used to print debugging information. """ state_dict_r2d = _load_checkpoint(self.pretrained, map_location='cpu') if 'state_dict' in state_dict_r2d: state_dict_r2d = state_dict_r2d['state_dict'] inflated_param_names = [] for name, module in self.named_modules(): if isinstance(module, ConvModule): # we use a ConvModule to wrap conv+bn+relu layers, thus the # name mapping is needed if 'downsample' in name: # layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0 original_conv_name = name + '.0' # layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1 original_bn_name = name + '.1' else: # layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n} original_conv_name = name # layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n} original_bn_name = name.replace('conv', 'bn') if original_conv_name + '.weight' not in state_dict_r2d: logger.warning(f'Module not exist in the state_dict_r2d' f': {original_conv_name}') else: shape_2d = state_dict_r2d[original_conv_name + '.weight'].shape shape_3d = module.conv.weight.data.shape if shape_2d != shape_3d[:2] + shape_3d[3:]: logger.warning(f'Weight shape mismatch for ' f': {original_conv_name} : ' f'3d weight shape: {shape_3d}; ' f'2d weight shape: {shape_2d}. ') else: self._inflate_conv_params(module.conv, state_dict_r2d, original_conv_name, inflated_param_names) if original_bn_name + '.weight' not in state_dict_r2d: logger.warning(f'Module not exist in the state_dict_r2d' f': {original_bn_name}') else: self._inflate_bn_params(module.bn, state_dict_r2d, original_bn_name, inflated_param_names) # check if any parameters in the 2d checkpoint are not loaded remaining_names = set( state_dict_r2d.keys()) - set(inflated_param_names) if remaining_names: logger.info(f'These parameters in the 2d checkpoint are not loaded' f': {remaining_names}')
Inflate the resnet2d parameters to resnet3d. The differences between resnet3d and resnet2d mainly lie in an extra axis of conv kernel. To utilize the pretrained parameters in 2d model, the weight of conv2d models should be inflated to fit in the shapes of the 3d counterpart. Args: logger (MMLogger): The logger used to print debugging information.
_inflate_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def inflate_weights(self, logger: MMLogger) -> None: """Inflate weights.""" self._inflate_weights(self, logger)
Inflate weights.
inflate_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def _make_stem_layer(self) -> None: """Construct the stem layers consists of a conv+norm+act module and a pooling layer.""" self.conv1 = ConvModule( self.in_channels, self.base_channels, kernel_size=self.conv1_kernel, stride=(self.conv1_stride_t, self.conv1_stride_s, self.conv1_stride_s), padding=tuple([(k - 1) // 2 for k in _triple(self.conv1_kernel)]), bias=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.maxpool = nn.MaxPool3d( kernel_size=(1, 3, 3), stride=(self.pool1_stride_t, self.pool1_stride_s, self.pool1_stride_s), padding=(0, 1, 1)) self.pool2 = nn.MaxPool3d(kernel_size=(2, 1, 1), stride=(2, 1, 1))
Construct the stem layers consists of a conv+norm+act module and a pooling layer.
_make_stem_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def _freeze_stages(self) -> None: """Prevent all the parameters from being optimized before ``self.frozen_stages``.""" if self.frozen_stages >= 0: self.conv1.eval() for param in self.conv1.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, f'layer{i}') m.eval() for param in m.parameters(): param.requires_grad = False
Prevent all the parameters from being optimized before ``self.frozen_stages``.
_freeze_stages
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def _init_weights(self, pretrained: Optional[str] = None) -> None: """Initiate the parameters either from existing checkpoint or from scratch. Args: pretrained (str | None): The path of the pretrained weight. Will override the original `pretrained` if set. The arg is added to be compatible with mmdet. Defaults to None. """ if pretrained: self.pretrained = pretrained if isinstance(self.pretrained, str): logger = MMLogger.get_current_instance() logger.info(f'load model from: {self.pretrained}') if self.pretrained2d: # Inflate 2D model into 3D model. self.inflate_weights(logger) else: # Directly load 3D model. load_checkpoint( self, self.pretrained, strict=False, logger=logger) elif self.pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv3d): kaiming_init(m) elif isinstance(m, _BatchNorm): constant_init(m, 1) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck3d): constant_init(m.conv3.bn, 0) elif isinstance(m, BasicBlock3d): constant_init(m.conv2.bn, 0) else: raise TypeError('pretrained must be a str or None')
Initiate the parameters either from existing checkpoint or from scratch. Args: pretrained (str | None): The path of the pretrained weight. Will override the original `pretrained` if set. The arg is added to be compatible with mmdet. Defaults to None.
_init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def init_weights(self, pretrained: Optional[str] = None) -> None: """Initialize weights.""" self._init_weights(self, pretrained)
Initialize weights.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def forward(self, x: torch.Tensor) \ -> Union[torch.Tensor, Tuple[torch.Tensor]]: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor or tuple[torch.Tensor]: The feature of the input samples extracted by the backbone. """ x = self.conv1(x) if self.with_pool1: x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i == 0 and self.with_pool2: x = self.pool2(x) if i in self.out_indices: outs.append(x) if len(outs) == 1: return outs[0] return tuple(outs)
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor or tuple[torch.Tensor]: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def train(self, mode: bool = True) -> None: """Set the optimization status when training.""" super().train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval()
Set the optimization status when training.
train
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def inflate_weights(self, logger: MMLogger) -> None: """Inflate weights.""" self._inflate_weights(self, logger)
Inflate weights.
inflate_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def _freeze_stages(self) -> None: """Prevent all the parameters from being optimized before ``self.frozen_stages``.""" if self.all_frozen: layer = getattr(self, self.layer_name) layer.eval() for param in layer.parameters(): param.requires_grad = False
Prevent all the parameters from being optimized before ``self.frozen_stages``.
_freeze_stages
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def init_weights(self, pretrained: Optional[str] = None) -> None: """Initialize weights.""" self._init_weights(self, pretrained)
Initialize weights.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The feature of the input samples extracted by the residual layer. """ res_layer = getattr(self, self.layer_name) out = res_layer(x) return out
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The feature of the input samples extracted by the residual layer.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def train(self, mode: bool = True) -> None: """Set the optimization status when training.""" super().train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval()
Set the optimization status when training.
train
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call.""" x = self.block(x) n, c, h, w = x.size() x = x.view(n // self.num_segments, self.num_segments, c, h, w).transpose(1, 2).contiguous() x = self.non_local_block(x) x = x.transpose(1, 2).contiguous().view(n, c, h, w) return x
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module. """ x = self.shift(x, self.num_segments, shift_div=self.shift_div) return self.net(x)
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def shift(x, num_segments, shift_div=3): """Perform temporal shift operation on the feature. Args: x (torch.Tensor): The input feature to be shifted. num_segments (int): Number of frame segments. shift_div (int): Number of divisions for shift. Default: 3. Returns: torch.Tensor: The shifted feature. """ # [N, C, H, W] n, c, h, w = x.size() # [N // num_segments, num_segments, C, H*W] # can't use 5 dimensional array on PPL2D backend for caffe x = x.view(-1, num_segments, c, h * w) # get shift fold fold = c // shift_div # split c channel into three parts: # left_split, mid_split, right_split left_split = x[:, :, :fold, :] mid_split = x[:, :, fold:2 * fold, :] right_split = x[:, :, 2 * fold:, :] # can't use torch.zeros(*A.shape) or torch.zeros_like(A) # because array on caffe inference must be got by computing # shift left on num_segments channel in `left_split` zeros = left_split - left_split blank = zeros[:, :1, :, :] left_split = left_split[:, 1:, :, :] left_split = torch.cat((left_split, blank), 1) # shift right on num_segments channel in `mid_split` zeros = mid_split - mid_split blank = zeros[:, :1, :, :] mid_split = mid_split[:, :-1, :, :] mid_split = torch.cat((blank, mid_split), 1) # right_split: no shift # concatenate out = torch.cat((left_split, mid_split, right_split), 2) # [N, C, H, W] # restore the original dimension return out.view(n, c, h, w)
Perform temporal shift operation on the feature. Args: x (torch.Tensor): The input feature to be shifted. num_segments (int): Number of frame segments. shift_div (int): Number of divisions for shift. Default: 3. Returns: torch.Tensor: The shifted feature.
shift
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def init_structure(self): """Initialize structure for tsm.""" if self.is_shift: self.make_temporal_shift() if len(self.non_local_cfg) != 0: self.make_non_local() if self.temporal_pool: self.make_temporal_pool()
Initialize structure for tsm.
init_structure
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def make_block_temporal(stage, num_segments): """Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks. """ blocks = list(stage.children()) for i, b in enumerate(blocks): blocks[i] = TemporalShift( b, num_segments=num_segments, shift_div=self.shift_div) return nn.Sequential(*blocks)
Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks.
make_temporal_shift.make_block_temporal
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def make_block_temporal(stage, num_segments): """Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks. """ blocks = list(stage.children()) for i, b in enumerate(blocks): if i % n_round == 0: blocks[i].conv1.conv = TemporalShift( b.conv1.conv, num_segments=num_segments, shift_div=self.shift_div) return nn.Sequential(*blocks)
Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks.
make_temporal_shift.make_block_temporal
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def make_temporal_shift(self): """Make temporal shift for some layers.""" if self.temporal_pool: num_segment_list = [ self.num_segments, self.num_segments // 2, self.num_segments // 2, self.num_segments // 2 ] else: num_segment_list = [self.num_segments] * 4 if num_segment_list[-1] <= 0: raise ValueError('num_segment_list[-1] must be positive') if self.shift_place == 'block': def make_block_temporal(stage, num_segments): """Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks. """ blocks = list(stage.children()) for i, b in enumerate(blocks): blocks[i] = TemporalShift( b, num_segments=num_segments, shift_div=self.shift_div) return nn.Sequential(*blocks) self.layer1 = make_block_temporal(self.layer1, num_segment_list[0]) self.layer2 = make_block_temporal(self.layer2, num_segment_list[1]) self.layer3 = make_block_temporal(self.layer3, num_segment_list[2]) self.layer4 = make_block_temporal(self.layer4, num_segment_list[3]) elif 'blockres' in self.shift_place: n_round = 1 if len(list(self.layer3.children())) >= 23: n_round = 2 def make_block_temporal(stage, num_segments): """Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks. """ blocks = list(stage.children()) for i, b in enumerate(blocks): if i % n_round == 0: blocks[i].conv1.conv = TemporalShift( b.conv1.conv, num_segments=num_segments, shift_div=self.shift_div) return nn.Sequential(*blocks) self.layer1 = make_block_temporal(self.layer1, num_segment_list[0]) self.layer2 = make_block_temporal(self.layer2, num_segment_list[1]) self.layer3 = make_block_temporal(self.layer3, num_segment_list[2]) self.layer4 = make_block_temporal(self.layer4, num_segment_list[3]) else: raise NotImplementedError
Make temporal shift for some layers.
make_temporal_shift
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call.""" # [N, C, H, W] n, c, h, w = x.size() # [N // num_segments, C, num_segments, H, W] x = x.view(n // self.num_segments, self.num_segments, c, h, w).transpose(1, 2) # [N // num_segmnets, C, num_segments // 2, H, W] x = self.max_pool3d(x) # [N // 2, C, H, W] x = x.transpose(1, 2).contiguous().view(n // 2, c, h, w) return self.net(x)
Defines the computation performed at every call.
make_temporal_pool.forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def make_temporal_pool(self): """Make temporal pooling between layer1 and layer2, using a 3D max pooling layer.""" class TemporalPool(nn.Module): """Temporal pool module. Wrap layer2 in ResNet50 with a 3D max pooling layer. Args: net (nn.Module): Module to make temporal pool. num_segments (int): Number of frame segments. """ def __init__(self, net, num_segments): super().__init__() self.net = net self.num_segments = num_segments self.max_pool3d = nn.MaxPool3d( kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0)) def forward(self, x): """Defines the computation performed at every call.""" # [N, C, H, W] n, c, h, w = x.size() # [N // num_segments, C, num_segments, H, W] x = x.view(n // self.num_segments, self.num_segments, c, h, w).transpose(1, 2) # [N // num_segmnets, C, num_segments // 2, H, W] x = self.max_pool3d(x) # [N // 2, C, H, W] x = x.transpose(1, 2).contiguous().view(n // 2, c, h, w) return self.net(x) self.layer2 = TemporalPool(self.layer2, self.num_segments)
Make temporal pooling between layer1 and layer2, using a 3D max pooling layer.
make_temporal_pool
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def make_non_local(self): """Wrap resnet layer into non local wrapper.""" # This part is for ResNet50 for i in range(self.num_stages): non_local_stage = self.non_local_stages[i] if sum(non_local_stage) == 0: continue layer_name = f'layer{i + 1}' res_layer = getattr(self, layer_name) for idx, non_local in enumerate(non_local_stage): if non_local: res_layer[idx] = NL3DWrapper(res_layer[idx], self.num_segments, self.non_local_cfg)
Wrap resnet layer into non local wrapper.
make_non_local
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def load_original_weights(self, logger): """Load weights from original checkpoint, which required converting keys.""" state_dict_torchvision = _load_checkpoint( self.pretrained, map_location='cpu') if 'state_dict' in state_dict_torchvision: state_dict_torchvision = state_dict_torchvision['state_dict'] wrapped_layers_map = dict() for name, module in self.named_modules(): # convert torchvision keys ori_name = name for wrap_prefix in self._get_wrap_prefix(): if wrap_prefix in ori_name: ori_name = ori_name.replace(wrap_prefix, '') wrapped_layers_map[ori_name] = name if isinstance(module, ConvModule): if 'downsample' in ori_name: # layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0 tv_conv_name = ori_name + '.0' # layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1 tv_bn_name = ori_name + '.1' else: # layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n} tv_conv_name = ori_name # layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n} tv_bn_name = ori_name.replace('conv', 'bn') for conv_param in ['.weight', '.bias']: if tv_conv_name + conv_param in state_dict_torchvision: state_dict_torchvision[ori_name+'.conv'+conv_param] = \ state_dict_torchvision.pop(tv_conv_name+conv_param) for bn_param in [ '.weight', '.bias', '.running_mean', '.running_var' ]: if tv_bn_name + bn_param in state_dict_torchvision: state_dict_torchvision[ori_name+'.bn'+bn_param] = \ state_dict_torchvision.pop(tv_bn_name+bn_param) # convert wrapped keys for param_name in list(state_dict_torchvision.keys()): layer_name = '.'.join(param_name.split('.')[:-1]) if layer_name in wrapped_layers_map: wrapped_name = param_name.replace( layer_name, wrapped_layers_map[layer_name]) print(f'wrapped_name {wrapped_name}') state_dict_torchvision[ wrapped_name] = state_dict_torchvision.pop(param_name) msg = self.load_state_dict(state_dict_torchvision, strict=False) logger.info(msg)
Load weights from original checkpoint, which required converting keys.
load_original_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def init_weights(self): """Initiate the parameters either from existing checkpoint or from scratch.""" if self.pretrained2d: logger = MMLogger.get_current_instance() self.load_original_weights(logger) else: if self.pretrained: self.init_cfg = dict( type='Pretrained', checkpoint=self.pretrained) super().init_weights()
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" if self.downsample_position == 'before': x = self.pool(x) x = self.conv(x) else: x = self.conv(x) x = self.pool(x) return x
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/necks/tpn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py
Apache-2.0
def forward(self, x: Tuple[torch.Tensor]) -> torch.Tensor: """Defines the computation performed at every call.""" out = [self.downsamples[i](feature) for i, feature in enumerate(x)] out = torch.cat(out, 1) out = self.fusion_conv(out) return out
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/necks/tpn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py
Apache-2.0
def forward(self, x: Tuple[torch.Tensor]) -> list: """Defines the computation performed at every call.""" out = [] for i, _ in enumerate(x): if isinstance(self.spatial_modulation[i], nn.ModuleList): out_ = x[i] for op in self.spatial_modulation[i]: out_ = op(out_) out.append(out_) else: out.append(self.spatial_modulation[i](x[i])) return out
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/necks/tpn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py
Apache-2.0
def init_weights(self) -> None: """Initiate the parameters from scratch.""" for m in self.modules(): if isinstance(m, nn.Linear): normal_init(m, std=0.01) if isinstance(m, nn.Conv3d): xavier_init(m, distribution='uniform') if isinstance(m, nn.BatchNorm3d): constant_init(m, 1)
Initiate the parameters from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/necks/tpn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py
Apache-2.0
def loss(self, x: torch.Tensor, data_samples: Optional[SampleList]) -> dict: """Calculate auxiliary loss.""" x = self(x) labels = [x.gt_label for x in data_samples] labels = torch.stack(labels).to(x.device) labels = labels.squeeze() if labels.shape == torch.Size([]): labels = labels.unsqueeze(0) losses = dict() losses['loss_aux'] = self.loss_weight * self.loss_cls(x, labels) return losses
Calculate auxiliary loss.
loss
python
open-mmlab/mmaction2
mmaction/models/necks/tpn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Auxiliary head forward function.""" x = self.conv(x) x = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1) x = self.dropout(x) x = self.fc(x) return x
Auxiliary head forward function.
forward
python
open-mmlab/mmaction2
mmaction/models/necks/tpn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" x = self.conv(x) x = self.pool(x) return x
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/necks/tpn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py
Apache-2.0
def init_weights(self) -> None: """Default init_weights for conv(msra) and norm in ConvModule.""" for m in self.modules(): if isinstance(m, nn.Conv3d): xavier_init(m, distribution='uniform') if isinstance(m, nn.BatchNorm3d): constant_init(m, 1) if self.aux_head is not None: self.aux_head.init_weights()
Default init_weights for conv(msra) and norm in ConvModule.
init_weights
python
open-mmlab/mmaction2
mmaction/models/necks/tpn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py
Apache-2.0
def forward(self, x: Tuple[torch.Tensor], data_samples: Optional[SampleList] = None) -> tuple: """Defines the computation performed at every call.""" loss_aux = dict() # Calculate auxiliary loss if `self.aux_head` # and `data_samples` are not None. if self.aux_head is not None and data_samples is not None: loss_aux = self.aux_head.loss(x[-2], data_samples) # Spatial Modulation spatial_modulation_outs = self.spatial_modulation(x) # Temporal Modulation temporal_modulation_outs = [] for i, temporal_modulation in enumerate(self.temporal_modulation_ops): temporal_modulation_outs.append( temporal_modulation(spatial_modulation_outs[i])) outs = [out.clone() for out in temporal_modulation_outs] if len(self.upsample_ops) != 0: for i in range(self.num_tpn_stages - 1, 0, -1): outs[i - 1] = outs[i - 1] + self.upsample_ops[i - 1](outs[i]) # Get top-down outs top_down_outs = self.level_fusion_1(outs) # Build bottom-up flow using downsample operation if self.flow_type == 'parallel': outs = [out.clone() for out in temporal_modulation_outs] if len(self.downsample_ops) != 0: for i in range(self.num_tpn_stages - 1): outs[i + 1] = outs[i + 1] + self.downsample_ops[i](outs[i]) # Get bottom-up outs botton_up_outs = self.level_fusion_2(outs) # fuse two pyramid outs outs = self.pyramid_fusion( torch.cat([top_down_outs, botton_up_outs], 1)) return outs, loss_aux
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/necks/tpn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py
Apache-2.0
def forward(ctx, pred, labels, is_positive, ohem_ratio, group_size): """Calculate OHEM hinge loss. Args: pred (torch.Tensor): Predicted completeness score. labels (torch.Tensor): Groundtruth class label. is_positive (int): Set to 1 when proposals are positive and set to -1 when proposals are incomplete. ohem_ratio (float): Ratio of hard examples. group_size (int): Number of proposals sampled per video. Returns: torch.Tensor: Returned class-wise hinge loss. """ num_samples = pred.size(0) if num_samples != len(labels): raise ValueError(f'Number of samples should be equal to that ' f'of labels, but got {num_samples} samples and ' f'{len(labels)} labels.') losses = torch.zeros(num_samples, device=pred.device) slopes = torch.zeros(num_samples, device=pred.device) for i in range(num_samples): losses[i] = max(0, 1 - is_positive * pred[i, labels[i] - 1]) slopes[i] = -is_positive if losses[i] != 0 else 0 losses = losses.view(-1, group_size).contiguous() sorted_losses, indices = torch.sort(losses, dim=1, descending=True) keep_length = int(group_size * ohem_ratio) loss = torch.zeros(1, device=pred.device) for i in range(losses.size(0)): loss += sorted_losses[i, :keep_length].sum() ctx.loss_index = indices[:, :keep_length] ctx.labels = labels ctx.slopes = slopes ctx.shape = pred.size() ctx.group_size = group_size ctx.num_groups = losses.size(0) return loss
Calculate OHEM hinge loss. Args: pred (torch.Tensor): Predicted completeness score. labels (torch.Tensor): Groundtruth class label. is_positive (int): Set to 1 when proposals are positive and set to -1 when proposals are incomplete. ohem_ratio (float): Ratio of hard examples. group_size (int): Number of proposals sampled per video. Returns: torch.Tensor: Returned class-wise hinge loss.
forward
python
open-mmlab/mmaction2
mmaction/models/losses/ohem_hinge_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ohem_hinge_loss.py
Apache-2.0
def backward(ctx, grad_output): """Defines a formula for differentiating the operation with backward mode automatic differentiation.""" labels = ctx.labels slopes = ctx.slopes grad_in = torch.zeros(ctx.shape, device=ctx.slopes.device) for group in range(ctx.num_groups): for idx in ctx.loss_index[group]: loc = idx + group * ctx.group_size grad_in[loc, labels[loc] - 1] = ( slopes[loc] * grad_output.data[0]) return torch.autograd.Variable(grad_in), None, None, None, None
Defines a formula for differentiating the operation with backward mode automatic differentiation.
backward
python
open-mmlab/mmaction2
mmaction/models/losses/ohem_hinge_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ohem_hinge_loss.py
Apache-2.0
def _forward(self, cls_score, label, **kwargs): """Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate nll loss. Returns: torch.Tensor: The returned nll loss. """ loss_cls = F.nll_loss(cls_score, label, **kwargs) return loss_cls
Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate nll loss. Returns: torch.Tensor: The returned nll loss.
_forward
python
open-mmlab/mmaction2
mmaction/models/losses/nll_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/nll_loss.py
Apache-2.0
def tem_loss(pred_start, pred_end, gt_start, gt_end): """Calculate Temporal Evaluation Module Loss. This function calculate the binary_logistic_regression_loss for start and end respectively and returns the sum of their losses. Args: pred_start (torch.Tensor): Predicted start score by BMN model. pred_end (torch.Tensor): Predicted end score by BMN model. gt_start (torch.Tensor): Groundtruth confidence score for start. gt_end (torch.Tensor): Groundtruth confidence score for end. Returns: torch.Tensor: Returned binary logistic loss. """ loss_start = binary_logistic_regression_loss(pred_start, gt_start) loss_end = binary_logistic_regression_loss(pred_end, gt_end) loss = loss_start + loss_end return loss
Calculate Temporal Evaluation Module Loss. This function calculate the binary_logistic_regression_loss for start and end respectively and returns the sum of their losses. Args: pred_start (torch.Tensor): Predicted start score by BMN model. pred_end (torch.Tensor): Predicted end score by BMN model. gt_start (torch.Tensor): Groundtruth confidence score for start. gt_end (torch.Tensor): Groundtruth confidence score for end. Returns: torch.Tensor: Returned binary logistic loss.
tem_loss
python
open-mmlab/mmaction2
mmaction/models/losses/bmn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py
Apache-2.0
def pem_reg_loss(pred_score, gt_iou_map, mask, high_temporal_iou_threshold=0.7, low_temporal_iou_threshold=0.3): """Calculate Proposal Evaluation Module Regression Loss. Args: pred_score (torch.Tensor): Predicted temporal_iou score by BMN. gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. mask (torch.Tensor): Boundary-Matching mask. high_temporal_iou_threshold (float): Higher threshold of temporal_iou. Default: 0.7. low_temporal_iou_threshold (float): Higher threshold of temporal_iou. Default: 0.3. Returns: torch.Tensor: Proposal evaluation regression loss. """ u_hmask = (gt_iou_map > high_temporal_iou_threshold).float() u_mmask = ((gt_iou_map <= high_temporal_iou_threshold) & (gt_iou_map > low_temporal_iou_threshold)).float() u_lmask = ((gt_iou_map <= low_temporal_iou_threshold) & (gt_iou_map > 0.)).float() u_lmask = u_lmask * mask num_h = torch.sum(u_hmask) num_m = torch.sum(u_mmask) num_l = torch.sum(u_lmask) r_m = num_h / num_m u_smmask = torch.rand_like(gt_iou_map) u_smmask = u_mmask * u_smmask u_smmask = (u_smmask > (1. - r_m)).float() r_l = num_h / num_l u_slmask = torch.rand_like(gt_iou_map) u_slmask = u_lmask * u_slmask u_slmask = (u_slmask > (1. - r_l)).float() weights = u_hmask + u_smmask + u_slmask loss = F.mse_loss(pred_score * weights, gt_iou_map * weights) loss = 0.5 * torch.sum( loss * torch.ones_like(weights)) / torch.sum(weights) return loss
Calculate Proposal Evaluation Module Regression Loss. Args: pred_score (torch.Tensor): Predicted temporal_iou score by BMN. gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. mask (torch.Tensor): Boundary-Matching mask. high_temporal_iou_threshold (float): Higher threshold of temporal_iou. Default: 0.7. low_temporal_iou_threshold (float): Higher threshold of temporal_iou. Default: 0.3. Returns: torch.Tensor: Proposal evaluation regression loss.
pem_reg_loss
python
open-mmlab/mmaction2
mmaction/models/losses/bmn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py
Apache-2.0
def forward(self, pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, bm_mask, weight_tem=1.0, weight_pem_reg=10.0, weight_pem_cls=1.0): """Calculate Boundary Matching Network Loss. Args: pred_bm (torch.Tensor): Predicted confidence score for boundary matching map. pred_start (torch.Tensor): Predicted confidence score for start. pred_end (torch.Tensor): Predicted confidence score for end. gt_iou_map (torch.Tensor): Groundtruth score for boundary matching map. gt_start (torch.Tensor): Groundtruth temporal_iou score for start. gt_end (torch.Tensor): Groundtruth temporal_iou score for end. bm_mask (torch.Tensor): Boundary-Matching mask. weight_tem (float): Weight for tem loss. Default: 1.0. weight_pem_reg (float): Weight for pem regression loss. Default: 10.0. weight_pem_cls (float): Weight for pem classification loss. Default: 1.0. Returns: tuple([torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]): (loss, tem_loss, pem_reg_loss, pem_cls_loss). Loss is the bmn loss, tem_loss is the temporal evaluation loss, pem_reg_loss is the proposal evaluation regression loss, pem_cls_loss is the proposal evaluation classification loss. """ pred_bm_reg = pred_bm[:, 0].contiguous() pred_bm_cls = pred_bm[:, 1].contiguous() gt_iou_map = gt_iou_map * bm_mask pem_reg_loss = self.pem_reg_loss(pred_bm_reg, gt_iou_map, bm_mask) pem_cls_loss = self.pem_cls_loss(pred_bm_cls, gt_iou_map, bm_mask) tem_loss = self.tem_loss(pred_start, pred_end, gt_start, gt_end) loss = ( weight_tem * tem_loss + weight_pem_reg * pem_reg_loss + weight_pem_cls * pem_cls_loss) return loss, tem_loss, pem_reg_loss, pem_cls_loss
Calculate Boundary Matching Network Loss. Args: pred_bm (torch.Tensor): Predicted confidence score for boundary matching map. pred_start (torch.Tensor): Predicted confidence score for start. pred_end (torch.Tensor): Predicted confidence score for end. gt_iou_map (torch.Tensor): Groundtruth score for boundary matching map. gt_start (torch.Tensor): Groundtruth temporal_iou score for start. gt_end (torch.Tensor): Groundtruth temporal_iou score for end. bm_mask (torch.Tensor): Boundary-Matching mask. weight_tem (float): Weight for tem loss. Default: 1.0. weight_pem_reg (float): Weight for pem regression loss. Default: 10.0. weight_pem_cls (float): Weight for pem classification loss. Default: 1.0. Returns: tuple([torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]): (loss, tem_loss, pem_reg_loss, pem_cls_loss). Loss is the bmn loss, tem_loss is the temporal evaluation loss, pem_reg_loss is the proposal evaluation regression loss, pem_cls_loss is the proposal evaluation classification loss.
forward
python
open-mmlab/mmaction2
mmaction/models/losses/bmn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py
Apache-2.0
def _forward(self, *args, **kwargs): """Forward function.""" pass
Forward function.
_forward
python
open-mmlab/mmaction2
mmaction/models/losses/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/base.py
Apache-2.0
def forward(self, *args, **kwargs): """Defines the computation performed at every call. Args: *args: The positional arguments for the corresponding loss. **kwargs: The keyword arguments for the corresponding loss. Returns: torch.Tensor: The calculated loss. """ ret = self._forward(*args, **kwargs) if isinstance(ret, dict): for k in ret: if 'loss' in k: ret[k] *= self.loss_weight else: ret *= self.loss_weight return ret
Defines the computation performed at every call. Args: *args: The positional arguments for the corresponding loss. **kwargs: The keyword arguments for the corresponding loss. Returns: torch.Tensor: The calculated loss.
forward
python
open-mmlab/mmaction2
mmaction/models/losses/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/base.py
Apache-2.0
def activity_loss(activity_score, labels, activity_indexer): """Activity Loss. It will calculate activity loss given activity_score and label. Args: activity_score (torch.Tensor): Predicted activity score. labels (torch.Tensor): Groundtruth class label. activity_indexer (torch.Tensor): Index slices of proposals. Returns: torch.Tensor: Returned cross entropy loss. """ pred = activity_score[activity_indexer, :] gt = labels[activity_indexer] return F.cross_entropy(pred, gt)
Activity Loss. It will calculate activity loss given activity_score and label. Args: activity_score (torch.Tensor): Predicted activity score. labels (torch.Tensor): Groundtruth class label. activity_indexer (torch.Tensor): Index slices of proposals. Returns: torch.Tensor: Returned cross entropy loss.
activity_loss
python
open-mmlab/mmaction2
mmaction/models/losses/ssn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ssn_loss.py
Apache-2.0
def completeness_loss(completeness_score, labels, completeness_indexer, positive_per_video, incomplete_per_video, ohem_ratio=0.17): """Completeness Loss. It will calculate completeness loss given completeness_score and label. Args: completeness_score (torch.Tensor): Predicted completeness score. labels (torch.Tensor): Groundtruth class label. completeness_indexer (torch.Tensor): Index slices of positive and incomplete proposals. positive_per_video (int): Number of positive proposals sampled per video. incomplete_per_video (int): Number of incomplete proposals sampled pre video. ohem_ratio (float): Ratio of online hard example mining. Default: 0.17. Returns: torch.Tensor: Returned class-wise completeness loss. """ pred = completeness_score[completeness_indexer, :] gt = labels[completeness_indexer] pred_dim = pred.size(1) pred = pred.view(-1, positive_per_video + incomplete_per_video, pred_dim) gt = gt.view(-1, positive_per_video + incomplete_per_video) # yapf:disable positive_pred = pred[:, :positive_per_video, :].contiguous().view(-1, pred_dim) # noqa:E501 incomplete_pred = pred[:, positive_per_video:, :].contiguous().view(-1, pred_dim) # noqa:E501 # yapf:enable positive_loss = OHEMHingeLoss.apply( positive_pred, gt[:, :positive_per_video].contiguous().view(-1), 1, 1.0, positive_per_video) incomplete_loss = OHEMHingeLoss.apply( incomplete_pred, gt[:, positive_per_video:].contiguous().view(-1), -1, ohem_ratio, incomplete_per_video) num_positives = positive_pred.size(0) num_incompletes = int(incomplete_pred.size(0) * ohem_ratio) return ((positive_loss + incomplete_loss) / float(num_positives + num_incompletes))
Completeness Loss. It will calculate completeness loss given completeness_score and label. Args: completeness_score (torch.Tensor): Predicted completeness score. labels (torch.Tensor): Groundtruth class label. completeness_indexer (torch.Tensor): Index slices of positive and incomplete proposals. positive_per_video (int): Number of positive proposals sampled per video. incomplete_per_video (int): Number of incomplete proposals sampled pre video. ohem_ratio (float): Ratio of online hard example mining. Default: 0.17. Returns: torch.Tensor: Returned class-wise completeness loss.
completeness_loss
python
open-mmlab/mmaction2
mmaction/models/losses/ssn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ssn_loss.py
Apache-2.0
def classwise_regression_loss(bbox_pred, labels, bbox_targets, regression_indexer): """Classwise Regression Loss. It will calculate classwise_regression loss given class_reg_pred and targets. Args: bbox_pred (torch.Tensor): Predicted interval center and span of positive proposals. labels (torch.Tensor): Groundtruth class label. bbox_targets (torch.Tensor): Groundtruth center and span of positive proposals. regression_indexer (torch.Tensor): Index slices of positive proposals. Returns: torch.Tensor: Returned class-wise regression loss. """ pred = bbox_pred[regression_indexer, :, :] gt = labels[regression_indexer] reg_target = bbox_targets[regression_indexer, :] class_idx = gt.data - 1 classwise_pred = pred[:, class_idx, :] classwise_reg_pred = torch.cat( (torch.diag(classwise_pred[:, :, 0]).view( -1, 1), torch.diag(classwise_pred[:, :, 1]).view(-1, 1)), dim=1) loss = F.smooth_l1_loss( classwise_reg_pred.view(-1), reg_target.view(-1)) * 2 return loss
Classwise Regression Loss. It will calculate classwise_regression loss given class_reg_pred and targets. Args: bbox_pred (torch.Tensor): Predicted interval center and span of positive proposals. labels (torch.Tensor): Groundtruth class label. bbox_targets (torch.Tensor): Groundtruth center and span of positive proposals. regression_indexer (torch.Tensor): Index slices of positive proposals. Returns: torch.Tensor: Returned class-wise regression loss.
classwise_regression_loss
python
open-mmlab/mmaction2
mmaction/models/losses/ssn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ssn_loss.py
Apache-2.0
def forward(self, activity_score, completeness_score, bbox_pred, proposal_type, labels, bbox_targets, train_cfg): """Calculate Boundary Matching Network Loss. Args: activity_score (torch.Tensor): Predicted activity score. completeness_score (torch.Tensor): Predicted completeness score. bbox_pred (torch.Tensor): Predicted interval center and span of positive proposals. proposal_type (torch.Tensor): Type index slices of proposals. labels (torch.Tensor): Groundtruth class label. bbox_targets (torch.Tensor): Groundtruth center and span of positive proposals. train_cfg (dict): Config for training. Returns: dict([torch.Tensor, torch.Tensor, torch.Tensor]): (loss_activity, loss_completeness, loss_reg). Loss_activity is the activity loss, loss_completeness is the class-wise completeness loss, loss_reg is the class-wise regression loss. """ self.sampler = train_cfg.ssn.sampler self.loss_weight = train_cfg.ssn.loss_weight losses = dict() proposal_type = proposal_type.view(-1) labels = labels.view(-1) activity_indexer = ((proposal_type == 0) + (proposal_type == 2)).nonzero().squeeze(1) completeness_indexer = ((proposal_type == 0) + (proposal_type == 1)).nonzero().squeeze(1) total_ratio = ( self.sampler.positive_ratio + self.sampler.background_ratio + self.sampler.incomplete_ratio) positive_per_video = int(self.sampler.num_per_video * (self.sampler.positive_ratio / total_ratio)) background_per_video = int( self.sampler.num_per_video * (self.sampler.background_ratio / total_ratio)) incomplete_per_video = ( self.sampler.num_per_video - positive_per_video - background_per_video) losses['loss_activity'] = self.activity_loss(activity_score, labels, activity_indexer) losses['loss_completeness'] = self.completeness_loss( completeness_score, labels, completeness_indexer, positive_per_video, incomplete_per_video, ohem_ratio=positive_per_video / incomplete_per_video) losses['loss_completeness'] *= self.loss_weight.comp_loss_weight if bbox_pred is not None: regression_indexer = (proposal_type == 0).nonzero().squeeze(1) bbox_targets = bbox_targets.view(-1, 2) losses['loss_reg'] = self.classwise_regression_loss( bbox_pred, labels, bbox_targets, regression_indexer) losses['loss_reg'] *= self.loss_weight.reg_loss_weight return losses
Calculate Boundary Matching Network Loss. Args: activity_score (torch.Tensor): Predicted activity score. completeness_score (torch.Tensor): Predicted completeness score. bbox_pred (torch.Tensor): Predicted interval center and span of positive proposals. proposal_type (torch.Tensor): Type index slices of proposals. labels (torch.Tensor): Groundtruth class label. bbox_targets (torch.Tensor): Groundtruth center and span of positive proposals. train_cfg (dict): Config for training. Returns: dict([torch.Tensor, torch.Tensor, torch.Tensor]): (loss_activity, loss_completeness, loss_reg). Loss_activity is the activity loss, loss_completeness is the class-wise completeness loss, loss_reg is the class-wise regression loss.
forward
python
open-mmlab/mmaction2
mmaction/models/losses/ssn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ssn_loss.py
Apache-2.0
def _forward(self, cls_score, label, mask, category_mask): """Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. mask (torch.Tensor): The mask of tags. 0 indicates that the category of this tag is missing in the label of the video. category_mask (torch.Tensor): The category mask. For each sample, it's a tensor with length `len(self.categories)`, denotes that if the category is labeled for this video. Returns: torch.Tensor: The returned CrossEntropy loss. """ if self.loss_type == 'all': loss_cls = F.binary_cross_entropy_with_logits( cls_score, label, reduction='none') if self.with_mask: w_loss_cls = mask * loss_cls w_loss_cls = torch.sum(w_loss_cls, dim=1) if self.reduction == 'mean': w_loss_cls = w_loss_cls / torch.sum(mask, dim=1) w_loss_cls = torch.mean(w_loss_cls) return dict(loss_cls=w_loss_cls) if self.reduction == 'sum': loss_cls = torch.sum(loss_cls, dim=-1) return dict(loss_cls=torch.mean(loss_cls)) if self.loss_type == 'individual': losses = {} loss_weights = {} for name, num, start_idx in zip(self.categories, self.category_nums, self.category_startidx): category_score = cls_score[:, start_idx:start_idx + num] category_label = label[:, start_idx:start_idx + num] category_loss = F.binary_cross_entropy_with_logits( category_score, category_label, reduction='none') if self.reduction == 'mean': category_loss = torch.mean(category_loss, dim=1) elif self.reduction == 'sum': category_loss = torch.sum(category_loss, dim=1) idx = self.categories.index(name) if self.with_mask: category_mask_i = category_mask[:, idx].reshape(-1) # there should be at least one sample which contains tags # in this category if torch.sum(category_mask_i) < 0.5: losses[f'{name}_LOSS'] = torch.tensor( .0, device=get_device()) loss_weights[f'{name}_LOSS'] = .0 continue category_loss = torch.sum(category_loss * category_mask_i) category_loss = category_loss / torch.sum(category_mask_i) else: category_loss = torch.mean(category_loss) # We name the loss of each category as 'LOSS', since we only # want to monitor them, not backward them. We will also provide # the loss used for backward in the losses dictionary losses[f'{name}_LOSS'] = category_loss loss_weights[f'{name}_LOSS'] = self.category_loss_weights[idx] loss_weight_sum = sum(loss_weights.values()) loss_weights = { k: v / loss_weight_sum for k, v in loss_weights.items() } loss_cls = sum([losses[k] * loss_weights[k] for k in losses]) losses['loss_cls'] = loss_cls # We also trace the loss weights losses.update({ k + '_weight': torch.tensor(v).to(losses[k].device) for k, v in loss_weights.items() }) # Note that the loss weights are just for reference. return losses else: raise ValueError("loss_type should be 'all' or 'individual', " f'but got {self.loss_type}')
Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. mask (torch.Tensor): The mask of tags. 0 indicates that the category of this tag is missing in the label of the video. category_mask (torch.Tensor): The category mask. For each sample, it's a tensor with length `len(self.categories)`, denotes that if the category is labeled for this video. Returns: torch.Tensor: The returned CrossEntropy loss.
_forward
python
open-mmlab/mmaction2
mmaction/models/losses/hvu_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/hvu_loss.py
Apache-2.0
def _forward(self, cls_score: torch.Tensor, label: torch.Tensor, **kwargs) -> torch.Tensor: """Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate CrossEntropy loss. Returns: torch.Tensor: The returned CrossEntropy loss. """ if cls_score.size() == label.size(): # calculate loss for soft label assert cls_score.dim() == 2, 'Only support 2-dim soft label' assert len(kwargs) == 0, \ ('For now, no extra args are supported for soft label, ' f'but get {kwargs}') lsm = F.log_softmax(cls_score, 1) if self.class_weight is not None: self.class_weight = self.class_weight.to(cls_score.device) lsm = lsm * self.class_weight.unsqueeze(0) loss_cls = -(label * lsm).sum(1) # default reduction 'mean' if self.class_weight is not None: # Use weighted average as pytorch CrossEntropyLoss does. # For more information, please visit https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html # noqa loss_cls = loss_cls.sum() / torch.sum( self.class_weight.unsqueeze(0) * label) else: loss_cls = loss_cls.mean() else: # calculate loss for hard label if self.class_weight is not None: assert 'weight' not in kwargs, \ "The key 'weight' already exists." kwargs['weight'] = self.class_weight.to(cls_score.device) loss_cls = F.cross_entropy(cls_score, label, **kwargs) return loss_cls
Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate CrossEntropy loss. Returns: torch.Tensor: The returned CrossEntropy loss.
_forward
python
open-mmlab/mmaction2
mmaction/models/losses/cross_entropy_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/cross_entropy_loss.py
Apache-2.0
def _forward(self, cls_score: torch.Tensor, label: torch.Tensor, **kwargs) -> torch.Tensor: """Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate bce loss with logits. Returns: torch.Tensor: The returned bce loss with logits. """ if self.class_weight is not None: assert 'weight' not in kwargs, "The key 'weight' already exists." kwargs['weight'] = self.class_weight.to(cls_score.device) loss_cls = F.binary_cross_entropy_with_logits(cls_score, label, **kwargs) return loss_cls
Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate bce loss with logits. Returns: torch.Tensor: The returned bce loss with logits.
_forward
python
open-mmlab/mmaction2
mmaction/models/losses/cross_entropy_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/cross_entropy_loss.py
Apache-2.0
def _forward(self, cls_score: torch.Tensor, label: torch.Tensor, **kwargs) -> torch.Tensor: """Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate bce loss with logits. Returns: torch.Tensor: The returned bce loss with logits. """ weights = torch.tensor(self.weights).float().to(cls_score.device) label_one_hot = F.one_hot(label, self.num_classes).float() weights = weights.unsqueeze(0) weights = weights.repeat(label_one_hot.shape[0], 1) * label_one_hot weights = weights.sum(1) weights = weights.unsqueeze(1) weights = weights.repeat(1, self.num_classes) BCELoss = F.binary_cross_entropy_with_logits( input=cls_score, target=label_one_hot, reduction='none') modulator = 1.0 if self.gamma: modulator = torch.exp(-self.gamma * label_one_hot * cls_score - self.gamma * torch.log(1 + torch.exp(-1.0 * cls_score))) loss = modulator * BCELoss weighted_loss = weights * loss focal_loss = torch.sum(weighted_loss) focal_loss /= torch.sum(label_one_hot) return focal_loss
Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate bce loss with logits. Returns: torch.Tensor: The returned bce loss with logits.
_forward
python
open-mmlab/mmaction2
mmaction/models/losses/cross_entropy_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/cross_entropy_loss.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" n, c, t, v = x.shape res = self.down(x) if self.with_res else 0 A_switch = {None: self.A, 'init': self.A} if hasattr(self, 'PA'): A_switch.update({ 'offset': self.A + self.PA, 'importance': self.A * self.PA }) A = A_switch[self.adaptive] if self.conv_pos == 'pre': x = self.conv(x) x = x.view(n, self.num_subsets, -1, t, v) x = torch.einsum('nkctv,kvw->nctw', (x, A)).contiguous() elif self.conv_pos == 'post': x = torch.einsum('nctv,kvw->nkctw', (x, A)).contiguous() x = x.view(n, -1, t, v) x = self.conv(x) return self.act(self.bn(x) + res)
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/utils/gcn_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/gcn_utils.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" N, C, T, V = x.size() y = None if self.adaptive: for i in range(self.num_subset): A1 = self.conv_a[i](x).permute(0, 3, 1, 2).contiguous().view( N, V, self.inter_c * T) A2 = self.conv_b[i](x).view(N, self.inter_c * T, V) A1 = self.tan(torch.matmul(A1, A2) / A1.size(-1)) # N V V A1 = self.A[i] + A1 * self.alpha A2 = x.view(N, C * T, V) z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V)) y = z + y if y is not None else z else: for i in range(self.num_subset): A1 = self.A[i] A2 = x.view(N, C * T, V) z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V)) y = z + y if y is not None else z y = self.relu(self.bn(y) + self.down(x)) if self.attention: # spatial attention first se = y.mean(-2) # N C V se1 = self.sigmoid(self.conv_sa(se)) # N 1 V y = y * se1.unsqueeze(-2) + y # then temporal attention se = y.mean(-1) # N C T se1 = self.sigmoid(self.conv_ta(se)) # N 1 T y = y * se1.unsqueeze(-1) + y # then spatial temporal attention ?? se = y.mean(-1).mean(-1) # N C se1 = self.relu(self.fc1c(se)) se2 = self.sigmoid(self.fc2c(se1)) # N C y = y * se2.unsqueeze(-1).unsqueeze(-1) + y # A little bit weird return y
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/utils/gcn_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/gcn_utils.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" return self.drop(self.bn(self.conv(x)))
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/utils/gcn_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/gcn_utils.py
Apache-2.0
def inner_forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" N, C, T, V = x.shape branch_outs = [] for tempconv in self.branches: out = tempconv(x) branch_outs.append(out) feat = torch.cat(branch_outs, dim=1) feat = self.transform(feat) return feat
Defines the computation performed at every call.
inner_forward
python
open-mmlab/mmaction2
mmaction/models/utils/gcn_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/gcn_utils.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" out = self.inner_forward(x) out = self.bn(out) return self.drop(out)
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/utils/gcn_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/gcn_utils.py
Apache-2.0
def k_adjacency(A: Union[torch.Tensor, np.ndarray], k: int, with_self: bool = False, self_factor: float = 1) -> np.ndarray: """Construct k-adjacency matrix. Args: A (torch.Tensor or np.ndarray): The adjacency matrix. k (int): The number of hops. with_self (bool): Whether to add self-loops to the k-adjacency matrix. The self-loops is critical for learning the relationships between the current joint and its k-hop neighbors. Defaults to False. self_factor (float): The scale factor to the added identity matrix. Defaults to 1. Returns: np.ndarray: The k-adjacency matrix. """ # A is a 2D square array if isinstance(A, torch.Tensor): A = A.data.cpu().numpy() assert isinstance(A, np.ndarray) Iden = np.eye(len(A), dtype=A.dtype) if k == 0: return Iden Ak = np.minimum(np.linalg.matrix_power(A + Iden, k), 1) - np.minimum( np.linalg.matrix_power(A + Iden, k - 1), 1) if with_self: Ak += (self_factor * Iden) return Ak
Construct k-adjacency matrix. Args: A (torch.Tensor or np.ndarray): The adjacency matrix. k (int): The number of hops. with_self (bool): Whether to add self-loops to the k-adjacency matrix. The self-loops is critical for learning the relationships between the current joint and its k-hop neighbors. Defaults to False. self_factor (float): The scale factor to the added identity matrix. Defaults to 1. Returns: np.ndarray: The k-adjacency matrix.
k_adjacency
python
open-mmlab/mmaction2
mmaction/models/utils/graph.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/graph.py
Apache-2.0
def edge2mat(edges: List[Tuple[int, int]], num_node: int) -> np.ndarray: """Get adjacency matrix from edges. Args: edges (list[tuple[int, int]]): The edges of the graph. num_node (int): The number of nodes of the graph. Returns: np.ndarray: The adjacency matrix. """ A = np.zeros((num_node, num_node)) for i, j in edges: A[j, i] = 1 return A
Get adjacency matrix from edges. Args: edges (list[tuple[int, int]]): The edges of the graph. num_node (int): The number of nodes of the graph. Returns: np.ndarray: The adjacency matrix.
edge2mat
python
open-mmlab/mmaction2
mmaction/models/utils/graph.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/graph.py
Apache-2.0
def normalize_digraph(A: np.ndarray, dim: int = 0) -> np.ndarray: """Normalize the digraph according to the given dimension. Args: A (np.ndarray): The adjacency matrix. dim (int): The dimension to perform normalization. Defaults to 0. Returns: np.ndarray: The normalized adjacency matrix. """ # A is a 2D square array Dl = np.sum(A, dim) h, w = A.shape Dn = np.zeros((w, w)) for i in range(w): if Dl[i] > 0: Dn[i, i] = Dl[i]**(-1) AD = np.dot(A, Dn) return AD
Normalize the digraph according to the given dimension. Args: A (np.ndarray): The adjacency matrix. dim (int): The dimension to perform normalization. Defaults to 0. Returns: np.ndarray: The normalized adjacency matrix.
normalize_digraph
python
open-mmlab/mmaction2
mmaction/models/utils/graph.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/graph.py
Apache-2.0
def get_hop_distance(num_node: int, edges: List[Tuple[int, int]], max_hop: int = 1) -> np.ndarray: """Get n-hop distance matrix by edges. Args: num_node (int): The number of nodes of the graph. edges (list[tuple[int, int]]): The edges of the graph. max_hop (int): The maximal distance between two connected nodes. Defaults to 1. Returns: np.ndarray: The n-hop distance matrix. """ A = np.eye(num_node) for i, j in edges: A[i, j] = 1 A[j, i] = 1 # compute hop steps hop_dis = np.zeros((num_node, num_node)) + np.inf transfer_mat = [np.linalg.matrix_power(A, d) for d in range(max_hop + 1)] arrive_mat = (np.stack(transfer_mat) > 0) for d in range(max_hop, -1, -1): hop_dis[arrive_mat[d]] = d return hop_dis
Get n-hop distance matrix by edges. Args: num_node (int): The number of nodes of the graph. edges (list[tuple[int, int]]): The edges of the graph. max_hop (int): The maximal distance between two connected nodes. Defaults to 1. Returns: np.ndarray: The n-hop distance matrix.
get_hop_distance
python
open-mmlab/mmaction2
mmaction/models/utils/graph.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/graph.py
Apache-2.0
def set_layout(self, layout: str) -> None: """Initialize the layout of candidates.""" if layout == 'openpose': self.num_node = 18 self.inward = [(4, 3), (3, 2), (7, 6), (6, 5), (13, 12), (12, 11), (10, 9), (9, 8), (11, 5), (8, 2), (5, 1), (2, 1), (0, 1), (15, 0), (14, 0), (17, 15), (16, 14)] self.center = 1 elif layout == 'nturgb+d': self.num_node = 25 neighbor_base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11), (13, 1), (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (22, 8), (23, 8), (24, 12), (25, 12)] self.inward = [(i - 1, j - 1) for (i, j) in neighbor_base] self.center = 21 - 1 elif layout == 'coco': self.num_node = 17 self.inward = [(15, 13), (13, 11), (16, 14), (14, 12), (11, 5), (12, 6), (9, 7), (7, 5), (10, 8), (8, 6), (5, 0), (6, 0), (1, 0), (3, 1), (2, 0), (4, 2)] self.center = 0 elif isinstance(layout, dict): self.num_node = layout['num_node'] self.inward = layout['inward'] self.center = layout['center'] else: raise ValueError(f'Do Not Exist This Layout: {layout}') self.self_link = [(i, i) for i in range(self.num_node)] self.outward = [(j, i) for (i, j) in self.inward] self.neighbor = self.inward + self.outward
Initialize the layout of candidates.
set_layout
python
open-mmlab/mmaction2
mmaction/models/utils/graph.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/graph.py
Apache-2.0
def stgcn_spatial(self) -> np.ndarray: """ST-GCN spatial mode.""" adj = np.zeros((self.num_node, self.num_node)) adj[self.hop_dis <= self.max_hop] = 1 normalize_adj = normalize_digraph(adj) hop_dis = self.hop_dis center = self.center A = [] for hop in range(self.max_hop + 1): a_close = np.zeros((self.num_node, self.num_node)) a_further = np.zeros((self.num_node, self.num_node)) for i in range(self.num_node): for j in range(self.num_node): if hop_dis[j, i] == hop: if hop_dis[j, center] >= hop_dis[i, center]: a_close[j, i] = normalize_adj[j, i] else: a_further[j, i] = normalize_adj[j, i] A.append(a_close) if hop > 0: A.append(a_further) return np.stack(A)
ST-GCN spatial mode.
stgcn_spatial
python
open-mmlab/mmaction2
mmaction/models/utils/graph.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/graph.py
Apache-2.0
def spatial(self) -> np.ndarray: """Standard spatial mode.""" Iden = edge2mat(self.self_link, self.num_node) In = normalize_digraph(edge2mat(self.inward, self.num_node)) Out = normalize_digraph(edge2mat(self.outward, self.num_node)) A = np.stack((Iden, In, Out)) return A
Standard spatial mode.
spatial
python
open-mmlab/mmaction2
mmaction/models/utils/graph.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/graph.py
Apache-2.0
def binary_adj(self) -> np.ndarray: """Construct an adjacency matrix for an undirected graph.""" A = edge2mat(self.neighbor, self.num_node) return A[None]
Construct an adjacency matrix for an undirected graph.
binary_adj
python
open-mmlab/mmaction2
mmaction/models/utils/graph.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/graph.py
Apache-2.0
def get_pad_shape(self, input_shape): """Calculate the padding size of input. Args: input_shape (:obj:`torch.Size`): arrange as (H, W). Returns: Tuple[int]: The padding size along the original H and W directions """ input_t, input_h, input_w = input_shape kernel_d, kernel_h, kernel_w = self.kernel_size stride_d, stride_h, stride_w = self.stride output_d = math.ceil(input_t / stride_d) output_h = math.ceil(input_h / stride_h) output_w = math.ceil(input_w / stride_w) pad_d = max((output_d - 1) * stride_d + (kernel_d - 1) * self.dilation[0] + 1 - input_t, 0) pad_h = max((output_h - 1) * stride_h + (kernel_h - 1) * self.dilation[1] + 1 - input_h, 0) pad_w = max((output_w - 1) * stride_w + (kernel_w - 1) * self.dilation[2] + 1 - input_w, 0) return pad_d, pad_h, pad_w
Calculate the padding size of input. Args: input_shape (:obj:`torch.Size`): arrange as (H, W). Returns: Tuple[int]: The padding size along the original H and W directions
get_pad_shape
python
open-mmlab/mmaction2
mmaction/models/utils/embed.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/embed.py
Apache-2.0
def forward(self, x): """Add padding to `x` Args: x (Tensor): Input tensor has shape (B, C, H, W). Returns: Tensor: The tensor with adaptive padding """ pad_d, pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) if pad_d > 0 or pad_h > 0 or pad_w > 0: if self.padding == 'corner': x = F.pad(x, [0, pad_w, 0, pad_h, 0, pad_d]) elif self.padding == 'same': x = F.pad(x, [ pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2, pad_d // 2, pad_d - pad_d // 2, ]) return x
Add padding to `x` Args: x (Tensor): Input tensor has shape (B, C, H, W). Returns: Tensor: The tensor with adaptive padding
forward
python
open-mmlab/mmaction2
mmaction/models/utils/embed.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/embed.py
Apache-2.0
def forward(self, x): """ Args: x (Tensor): Has shape (B, C, T, H, W). In most case, C is 3. Returns: tuple: Contains merged results and its spatial shape. - x (Tensor): Has shape (B, out_t * out_h * out_w, embed_dims) - out_size (tuple[int]): Spatial shape of x, arrange as (out_t, out_h, out_w). """ if self.adaptive_padding: x = self.adaptive_padding(x) x = self.projection(x) out_size = (x.shape[2], x.shape[3], x.shape[4]) x = x.flatten(2).transpose(1, 2) if self.norm is not None: x = self.norm(x) return x, out_size
Args: x (Tensor): Has shape (B, C, T, H, W). In most case, C is 3. Returns: tuple: Contains merged results and its spatial shape. - x (Tensor): Has shape (B, out_t * out_h * out_w, embed_dims) - out_size (tuple[int]): Spatial shape of x, arrange as (out_t, out_h, out_w).
forward
python
open-mmlab/mmaction2
mmaction/models/utils/embed.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/embed.py
Apache-2.0
def do_blending(self, imgs: torch.Tensor, label: torch.Tensor, **kwargs) -> Tuple: """Blending images process.""" raise NotImplementedError
Blending images process.
do_blending
python
open-mmlab/mmaction2
mmaction/models/utils/blending_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/blending_utils.py
Apache-2.0
def __call__(self, imgs: torch.Tensor, batch_data_samples: SampleList, **kwargs) -> Tuple: """Blending data in a mini-batch. Images are float tensors with the shape of (B, N, C, H, W) for 2D recognizers or (B, N, C, T, H, W) for 3D recognizers. Besides, labels are converted from hard labels to soft labels. Hard labels are integer tensors with the shape of (B, ) and all of the elements are in the range [0, num_classes - 1]. Soft labels (probability distribution over classes) are float tensors with the shape of (B, num_classes) and all of the elements are in the range [0, 1]. Args: imgs (torch.Tensor): Model input images, float tensor with the shape of (B, N, C, H, W) or (B, N, C, T, H, W). batch_data_samples (List[:obj:`ActionDataSample`]): The batch data samples. It usually includes information such as `gt_label`. Returns: mixed_imgs (torch.Tensor): Blending images, float tensor with the same shape of the input imgs. batch_data_samples (List[:obj:`ActionDataSample`]): The modified batch data samples. ``gt_label`` in each data sample are converted from a hard label to a blended soft label, float tensor with the shape of (num_classes, ) and all elements are in range [0, 1]. """ label = [x.gt_label for x in batch_data_samples] # single-label classification if label[0].size(0) == 1: label = torch.tensor(label, dtype=torch.long).to(imgs.device) one_hot_label = F.one_hot(label, num_classes=self.num_classes) # multi-label classification else: one_hot_label = torch.stack(label) mixed_imgs, mixed_label = self.do_blending(imgs, one_hot_label, **kwargs) for label_item, sample in zip(mixed_label, batch_data_samples): sample.set_gt_label(label_item) return mixed_imgs, batch_data_samples
Blending data in a mini-batch. Images are float tensors with the shape of (B, N, C, H, W) for 2D recognizers or (B, N, C, T, H, W) for 3D recognizers. Besides, labels are converted from hard labels to soft labels. Hard labels are integer tensors with the shape of (B, ) and all of the elements are in the range [0, num_classes - 1]. Soft labels (probability distribution over classes) are float tensors with the shape of (B, num_classes) and all of the elements are in the range [0, 1]. Args: imgs (torch.Tensor): Model input images, float tensor with the shape of (B, N, C, H, W) or (B, N, C, T, H, W). batch_data_samples (List[:obj:`ActionDataSample`]): The batch data samples. It usually includes information such as `gt_label`. Returns: mixed_imgs (torch.Tensor): Blending images, float tensor with the same shape of the input imgs. batch_data_samples (List[:obj:`ActionDataSample`]): The modified batch data samples. ``gt_label`` in each data sample are converted from a hard label to a blended soft label, float tensor with the shape of (num_classes, ) and all elements are in range [0, 1].
__call__
python
open-mmlab/mmaction2
mmaction/models/utils/blending_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/blending_utils.py
Apache-2.0
def do_blending(self, imgs: torch.Tensor, label: torch.Tensor, **kwargs) -> Tuple: """Blending images with mixup. Args: imgs (torch.Tensor): Model input images, float tensor with the shape of (B, N, C, H, W) or (B, N, C, T, H, W). label (torch.Tensor): One hot labels, integer tensor with the shape of (B, num_classes). Returns: tuple: A tuple of blended images and labels. """ assert len(kwargs) == 0, f'unexpected kwargs for mixup {kwargs}' lam = self.beta.sample() batch_size = imgs.size(0) rand_index = torch.randperm(batch_size) mixed_imgs = lam * imgs + (1 - lam) * imgs[rand_index, :] mixed_label = lam * label + (1 - lam) * label[rand_index, :] return mixed_imgs, mixed_label
Blending images with mixup. Args: imgs (torch.Tensor): Model input images, float tensor with the shape of (B, N, C, H, W) or (B, N, C, T, H, W). label (torch.Tensor): One hot labels, integer tensor with the shape of (B, num_classes). Returns: tuple: A tuple of blended images and labels.
do_blending
python
open-mmlab/mmaction2
mmaction/models/utils/blending_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/blending_utils.py
Apache-2.0
def rand_bbox(img_size: torch.Size, lam: torch.Tensor) -> Tuple: """Generate a random boudning box.""" w = img_size[-1] h = img_size[-2] cut_rat = torch.sqrt(1. - lam) cut_w = torch.tensor(int(w * cut_rat)) cut_h = torch.tensor(int(h * cut_rat)) # uniform cx = torch.randint(w, (1, ))[0] cy = torch.randint(h, (1, ))[0] bbx1 = torch.clamp(cx - floor_div(cut_w, 2), 0, w) bby1 = torch.clamp(cy - floor_div(cut_h, 2), 0, h) bbx2 = torch.clamp(cx + floor_div(cut_w, 2), 0, w) bby2 = torch.clamp(cy + floor_div(cut_h, 2), 0, h) return bbx1, bby1, bbx2, bby2
Generate a random boudning box.
rand_bbox
python
open-mmlab/mmaction2
mmaction/models/utils/blending_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/blending_utils.py
Apache-2.0
def do_blending(self, imgs: torch.Tensor, label: torch.Tensor, **kwargs) -> Tuple: """Blending images with cutmix. Args: imgs (torch.Tensor): Model input images, float tensor with the shape of (B, N, C, H, W) or (B, N, C, T, H, W). label (torch.Tensor): One hot labels, integer tensor with the shape of (B, num_classes). Returns: tuple: A tuple of blended images and labels. """ assert len(kwargs) == 0, f'unexpected kwargs for cutmix {kwargs}' batch_size = imgs.size(0) rand_index = torch.randperm(batch_size) lam = self.beta.sample() bbx1, bby1, bbx2, bby2 = self.rand_bbox(imgs.size(), lam) imgs[:, ..., bby1:bby2, bbx1:bbx2] = imgs[rand_index, ..., bby1:bby2, bbx1:bbx2] lam = 1 - (1.0 * (bbx2 - bbx1) * (bby2 - bby1) / (imgs.size()[-1] * imgs.size()[-2])) label = lam * label + (1 - lam) * label[rand_index, :] return imgs, label
Blending images with cutmix. Args: imgs (torch.Tensor): Model input images, float tensor with the shape of (B, N, C, H, W) or (B, N, C, T, H, W). label (torch.Tensor): One hot labels, integer tensor with the shape of (B, num_classes). Returns: tuple: A tuple of blended images and labels.
do_blending
python
open-mmlab/mmaction2
mmaction/models/utils/blending_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/blending_utils.py
Apache-2.0
def do_blending(self, imgs: torch.Tensor, label: torch.Tensor, **kwargs) -> Tuple: """Randomly apply batch augmentations to the batch inputs and batch data samples.""" aug_index = np.random.choice(len(self.augments), p=self.probs) aug = self.augments[aug_index] if aug is not None: return aug.do_blending(imgs, label, **kwargs) else: return imgs, label
Randomly apply batch augmentations to the batch inputs and batch data samples.
do_blending
python
open-mmlab/mmaction2
mmaction/models/utils/blending_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/blending_utils.py
Apache-2.0
def loss(self, x: Union[Tensor, Tuple[Tensor]], rpn_results_list: InstanceList, data_samples: SampleList, **kwargs) -> dict: """Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rpn_results_list (List[:obj:`InstanceData`]): List of region proposals. data_samples (List[:obj:`ActionDataSample`]): The batch data samples. Returns: Dict[str, Tensor]: A dictionary of loss components. """ assert len(rpn_results_list) == len(data_samples) batch_gt_instances = [] for data_sample in data_samples: batch_gt_instances.append(data_sample.gt_instances) # assign gts and sample proposals num_imgs = len(data_samples) sampling_results = [] for i in range(num_imgs): # rename rpn_results.bboxes to rpn_results.priors rpn_results = rpn_results_list[i] rpn_results.priors = rpn_results.pop('bboxes') assign_result = self.bbox_assigner.assign(rpn_results, batch_gt_instances[i], None) sampling_result = self.bbox_sampler.sample(assign_result, rpn_results, batch_gt_instances[i]) sampling_results.append(sampling_result) # LFB needs meta_info: 'img_key' batch_img_metas = [ data_samples.metainfo for data_samples in data_samples ] losses = dict() # bbox head forward and loss bbox_results = self.bbox_loss(x, sampling_results, batch_img_metas) losses.update(bbox_results['loss_bbox']) return losses
Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rpn_results_list (List[:obj:`InstanceData`]): List of region proposals. data_samples (List[:obj:`ActionDataSample`]): The batch data samples. Returns: Dict[str, Tensor]: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def _bbox_forward(self, x: Union[Tensor, Tuple[Tensor]], rois: Tensor, batch_img_metas: List[dict], **kwargs) -> dict: """Box head forward function used in both training and testing. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. batch_img_metas (List[dict]): List of image information. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. """ bbox_feats, global_feat = self.bbox_roi_extractor(x, rois) if self.with_shared_head: bbox_feats = self.shared_head( bbox_feats, feat=global_feat, rois=rois, img_metas=batch_img_metas) cls_score = self.bbox_head(bbox_feats) bbox_results = dict(cls_score=cls_score, bbox_feats=bbox_feats) return bbox_results
Box head forward function used in both training and testing. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. batch_img_metas (List[dict]): List of image information. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features.
_bbox_forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def bbox_loss(self, x: Union[Tensor, Tuple[Tensor]], sampling_results: List[SamplingResult], batch_img_metas: List[dict], **kwargs) -> dict: """Perform forward propagation and loss calculation of the bbox head on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. sampling_results (List[SamplingResult]): Sampling results. batch_img_metas (List[dict]): List of image information. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components. """ rois = bbox2roi([res.priors for res in sampling_results]) bbox_results = self._bbox_forward(x, rois, batch_img_metas) bbox_loss_and_target = self.bbox_head.loss_and_target( cls_score=bbox_results['cls_score'], rois=rois, sampling_results=sampling_results, rcnn_train_cfg=self.train_cfg) bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox']) return bbox_results
Perform forward propagation and loss calculation of the bbox head on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. sampling_results (List[SamplingResult]): Sampling results. batch_img_metas (List[dict]): List of image information. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components.
bbox_loss
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def predict(self, x: Union[Tensor, Tuple[Tensor]], rpn_results_list: InstanceList, data_samples: SampleList, **kwargs) -> InstanceList: """Perform forward propagation of the roi head and predict detection results on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rpn_results_list (List[:obj:`InstanceData`]): list of region proposals. data_samples (List[:obj:`ActionDataSample`]): The batch data samples. Returns: List[obj:`InstanceData`]: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). """ assert self.with_bbox, 'Bbox head must be implemented.' batch_img_metas = [ data_samples.metainfo for data_samples in data_samples ] if isinstance(x, tuple): x_shape = x[0].shape else: x_shape = x.shape assert x_shape[0] == 1, 'only accept 1 sample at test mode' assert x_shape[0] == len(batch_img_metas) == len(rpn_results_list) results_list = self.predict_bbox( x, batch_img_metas, rpn_results_list, rcnn_test_cfg=self.test_cfg) return results_list
Perform forward propagation of the roi head and predict detection results on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rpn_results_list (List[:obj:`InstanceData`]): list of region proposals. data_samples (List[:obj:`ActionDataSample`]): The batch data samples. Returns: List[obj:`InstanceData`]: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
predict
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], rpn_results_list: InstanceList, rcnn_test_cfg: ConfigType) -> InstanceList: """Perform forward propagation of the bbox head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys: - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). """ proposals = [res.bboxes for res in rpn_results_list] rois = bbox2roi(proposals) bbox_results = self._bbox_forward(x, rois, batch_img_metas) # split batch bbox prediction back to each image cls_scores = bbox_results['cls_score'] num_proposals_per_img = tuple(len(p) for p in proposals) rois = rois.split(num_proposals_per_img, 0) cls_scores = cls_scores.split(num_proposals_per_img, 0) result_list = self.bbox_head.predict_by_feat( rois=rois, cls_scores=cls_scores, batch_img_metas=batch_img_metas, rcnn_test_cfg=rcnn_test_cfg) return result_list
Perform forward propagation of the bbox head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys: - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
predict_bbox
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def init_weights(self, **kwargs): """Weight Initialization for ACRNHead.""" for m in self.modules(): if isinstance(m, nn.Conv3d): kaiming_init(m) elif isinstance(m, _BatchNorm): constant_init(m, 1)
Weight Initialization for ACRNHead.
init_weights
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/acrn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/acrn_head.py
Apache-2.0
def forward(self, x, feat, rois, **kwargs): """Defines the computation performed at every call. Args: x (torch.Tensor): The extracted RoI feature. feat (torch.Tensor): The context feature. rois (torch.Tensor): The regions of interest. Returns: torch.Tensor: The RoI features that have interacted with context feature. """ # We use max pooling by default x = self.max_pool(x) h, w = feat.shape[-2:] x_tile = x.repeat(1, 1, 1, h, w) roi_inds = rois[:, 0].type(torch.long) roi_gfeat = feat[roi_inds] new_feat = torch.cat([x_tile, roi_gfeat], dim=1) new_feat = self.conv1(new_feat) new_feat = self.conv2(new_feat) for conv in self.convs: new_feat = conv(new_feat) return new_feat
Defines the computation performed at every call. Args: x (torch.Tensor): The extracted RoI feature. feat (torch.Tensor): The context feature. rois (torch.Tensor): The regions of interest. Returns: torch.Tensor: The RoI features that have interacted with context feature.
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/acrn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/acrn_head.py
Apache-2.0
def init_weights(self, pretrained=None): """Initiate the parameters either from existing checkpoint or from scratch.""" if isinstance(pretrained, str): logger = MMLogger.get_current_instance() logger.info(f'load model from: {pretrained}') load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv3d): kaiming_init(m) elif isinstance(m, _BatchNorm): constant_init(m, 1) if self.zero_init_out_conv: constant_init(self.out_conv, 0, bias=0) else: raise TypeError('pretrained must be a str or None')
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/fbo_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py
Apache-2.0
def forward(self, st_feat, lt_feat): """Defines the computation performed at every call.""" n, c = st_feat.size(0), self.latent_channels num_st_feat, num_lt_feat = self.num_st_feat, self.num_lt_feat theta = self.st_feat_conv(st_feat) theta = theta.view(n, c, num_st_feat) phi = self.lt_feat_conv(lt_feat) phi = phi.view(n, c, num_lt_feat) g = self.global_conv(lt_feat) g = g.view(n, c, num_lt_feat) # (n, num_st_feat, c), (n, c, num_lt_feat) # -> (n, num_st_feat, num_lt_feat) theta_phi = torch.matmul(theta.permute(0, 2, 1), phi) if self.use_scale: theta_phi /= c**0.5 p = theta_phi.softmax(dim=-1) # (n, c, num_lt_feat), (n, num_lt_feat, num_st_feat) # -> (n, c, num_st_feat, 1, 1) out = torch.matmul(g, p.permute(0, 2, 1)).view(n, c, num_st_feat, 1, 1) # If need to activate it before out_conv, use relu here, otherwise # use relu outside the non local layer. if self.pre_activate: if self.pre_activate_with_ln: out = self.ln(out) out = self.relu(out) out = self.out_conv(out) if not self.pre_activate: out = self.ln(out) if self.dropout_ratio > 0: out = self.dropout(out) return out
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/fbo_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py
Apache-2.0
def init_weights(self, pretrained=None): """Initiate the parameters either from existing checkpoint or from scratch.""" if isinstance(pretrained, str): logger = MMLogger.get_current_instance() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: kaiming_init(self.st_feat_conv) kaiming_init(self.lt_feat_conv) for layer_name in self.non_local_layers: non_local_layer = getattr(self, layer_name) non_local_layer.init_weights(pretrained=pretrained) else: raise TypeError('pretrained must be a str or None')
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/fbo_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py
Apache-2.0
def forward(self, st_feat, lt_feat): """Defines the computation performed at every call.""" # prepare st_feat st_feat = self.st_feat_conv(st_feat) if self.st_feat_dropout_ratio > 0: st_feat = self.st_feat_dropout(st_feat) # prepare lt_feat lt_feat = self.lt_feat_conv(lt_feat) if self.lt_feat_dropout_ratio > 0: lt_feat = self.lt_feat_dropout(lt_feat) # fuse short-term and long-term features in NonLocal Layer for layer_name in self.non_local_layers: identity = st_feat non_local_layer = getattr(self, layer_name) nl_out = non_local_layer(st_feat, lt_feat) nl_out = identity + nl_out if not self.pre_activate: nl_out = self.relu(nl_out) st_feat = nl_out return nl_out
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/fbo_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py
Apache-2.0
def init_weights(self, pretrained=None): """FBOMax has no parameters to be initialized.""" pass
FBOMax has no parameters to be initialized.
init_weights
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/fbo_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py
Apache-2.0
def forward(self, st_feat, lt_feat): """Defines the computation performed at every call.""" out = self.max_pool(lt_feat) return out
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/fbo_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py
Apache-2.0
def init_weights(self, pretrained=None): """Initialize the weights in the module. Args: pretrained (str, optional): Path to pre-trained weights. Default: None. """ self.fbo.init_weights(pretrained=pretrained)
Initialize the weights in the module. Args: pretrained (str, optional): Path to pre-trained weights. Default: None.
init_weights
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/fbo_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py
Apache-2.0
def sample_lfb(self, rois, img_metas): """Sample long-term features for each ROI feature.""" inds = rois[:, 0].type(torch.int64) lt_feat_list = [] for ind in inds: lt_feat_list.append(self.lfb[img_metas[ind]['img_key']]) lt_feat = torch.stack(lt_feat_list, dim=0) # [N, lfb_channels, window_size * max_num_feat_per_step] lt_feat = lt_feat.permute(0, 2, 1).contiguous() return lt_feat.unsqueeze(-1).unsqueeze(-1)
Sample long-term features for each ROI feature.
sample_lfb
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/fbo_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py
Apache-2.0
def forward(self, x, rois, img_metas, **kwargs): """Defines the computation performed at every call.""" # [N, C, 1, 1, 1] st_feat = self.temporal_pool(x) st_feat = self.spatial_pool(st_feat) identity = st_feat # [N, C, window_size * num_feat_per_step, 1, 1] lt_feat = self.sample_lfb(rois, img_metas).to(st_feat.device) fbo_feat = self.fbo(st_feat, lt_feat) out = torch.cat([identity, fbo_feat], dim=1) return out
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/fbo_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py
Apache-2.0
def __getitem__(self, img_key): """Sample long term features like `lfb['0f39OWEqJ24,0902']` where `lfb` is a instance of class LFB.""" video_id, timestamp = img_key.split(',') return self.sample_long_term_features(video_id, int(timestamp))
Sample long term features like `lfb['0f39OWEqJ24,0902']` where `lfb` is a instance of class LFB.
__getitem__
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/lfb.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/lfb.py
Apache-2.0
def __len__(self): """The number of videos whose ROI features are stored in LFB.""" return len(self.lfb)
The number of videos whose ROI features are stored in LFB.
__len__
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/lfb.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/lfb.py
Apache-2.0
def init_weights(self, pretrained=None): """LFBInferHead has no parameters to be initialized.""" pass
LFBInferHead has no parameters to be initialized.
init_weights
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/lfb_infer_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/lfb_infer_head.py
Apache-2.0
def forward(self, x, rois, img_metas, **kwargs): """Defines the computation performed at every call. Args: x (torch.Tensor): The extracted RoI feature. rois (torch.Tensor): The regions of interest. img_metas (List[dict]): The meta information of the data. Returns: torch.Tensor: The RoI features that have interacted with context """ # [N, C, 1, 1, 1] features = self.temporal_pool(x) features = self.spatial_pool(features) if self.use_half_precision: features = features.half() inds = rois[:, 0].type(torch.int64) for ind in inds: self.all_metadata.append(img_metas[ind]['img_key']) self.all_features += list(features) # Return the input directly and doesn't affect the input. return x
Defines the computation performed at every call. Args: x (torch.Tensor): The extracted RoI feature. rois (torch.Tensor): The regions of interest. img_metas (List[dict]): The meta information of the data. Returns: torch.Tensor: The RoI features that have interacted with context
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/lfb_infer_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/lfb_infer_head.py
Apache-2.0
def forward(self, feat: Union[Tensor, Tuple[Tensor]], rois: Tensor) -> tuple: """Forward function for extract roi features. Args: feat (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. The shape of feat is N, C, T, H, W. rois (Tensor): Input RoIs, shape (k, 5). Returns: tuple: A tuple of roi features and global features. - roi_feats (Tensor): Extracted bbox RoI features. - feat (Tensor): Global features of the video clip. """ if not isinstance(feat, tuple): feat = (feat, ) if len(feat) >= 2: maxT = max([x.shape[2] for x in feat]) max_shape = (maxT, ) + feat[0].shape[3:] # resize each feat to the largest shape (w. nearest) feat = [F.interpolate(x, max_shape).contiguous() for x in feat] if self.with_temporal_pool: if self.temporal_pool_mode == 'avg': feat = [torch.mean(x, 2, keepdim=True) for x in feat] elif self.temporal_pool_mode == 'max': feat = [torch.max(x, 2, keepdim=True)[0] for x in feat] else: raise NotImplementedError feat = torch.cat(feat, axis=1).contiguous() roi_feats = [] for t in range(feat.size(2)): frame_feat = feat[:, :, t].contiguous() roi_feat = self.roi_layer(frame_feat, rois) if self.with_global: global_feat = self.global_pool(frame_feat.contiguous()) inds = rois[:, 0].type(torch.int64) global_feat = global_feat[inds] roi_feat = torch.cat([roi_feat, global_feat], dim=1) roi_feat = roi_feat.contiguous() roi_feats.append(roi_feat) roi_feats = torch.stack(roi_feats, dim=2) return roi_feats, feat
Forward function for extract roi features. Args: feat (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. The shape of feat is N, C, T, H, W. rois (Tensor): Input RoIs, shape (k, 5). Returns: tuple: A tuple of roi features and global features. - roi_feats (Tensor): Extracted bbox RoI features. - feat (Tensor): Global features of the video clip.
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_extractors/single_straight3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_extractors/single_straight3d.py
Apache-2.0
def init_weights(self) -> None: """Initialize the classification head.""" for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0)
Initialize the classification head.
init_weights
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def forward(self, x: Tensor) -> Tensor: """Computes the classification logits given ROI features.""" if self.dropout_before_pool and self.dropout_ratio > 0: x = self.dropout(x) x = self.temporal_pool(x) x = self.spatial_pool(x) if not self.dropout_before_pool and self.dropout_ratio > 0: x = self.dropout(x) x = x.view(x.size(0), -1) cls_score = self.fc_cls(x) return cls_score
Computes the classification logits given ROI features.
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def get_recall_prec(pred_vec: Tensor, target_vec: Tensor) -> tuple: """Computes the Recall/Precision for both multi-label and single label scenarios. Note that the computation calculates the micro average. Note, that in both cases, the concept of correct/incorrect is the same. Args: pred_vec (tensor[N x C]): each element is either 0 or 1 target_vec (tensor[N x C]): each element is either 0 or 1 - for single label it is expected that only one element is on (1) although this is not enforced. """ correct = pred_vec & target_vec recall = correct.sum(1) / target_vec.sum(1).float() # Enforce Float prec = correct.sum(1) / (pred_vec.sum(1) + 1e-6) return recall.mean(), prec.mean()
Computes the Recall/Precision for both multi-label and single label scenarios. Note that the computation calculates the micro average. Note, that in both cases, the concept of correct/incorrect is the same. Args: pred_vec (tensor[N x C]): each element is either 0 or 1 target_vec (tensor[N x C]): each element is either 0 or 1 - for single label it is expected that only one element is on (1) although this is not enforced.
get_recall_prec
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def topk_to_matrix(probs: Tensor, k: int) -> Tensor: """Converts top-k to binary matrix.""" topk_labels = probs.topk(k, 1, True, True)[1] topk_matrix = probs.new_full(probs.size(), 0, dtype=torch.bool) for i in range(probs.shape[0]): topk_matrix[i, topk_labels[i]] = 1 return topk_matrix
Converts top-k to binary matrix.
topk_to_matrix
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def topk_accuracy(self, pred: Tensor, target: Tensor, thr: float = 0.5) -> tuple: """Computes the Top-K Accuracies for both single and multi-label scenarios.""" # Define Target vector: target_bool = target > 0.5 # Branch on Multilabel for computing output classification if self.multilabel: pred = pred.sigmoid() else: pred = pred.softmax(dim=1) # Compute at threshold (K=1 for single) if self.multilabel: pred_bool = pred > thr else: pred_bool = self.topk_to_matrix(pred, 1) recall_thr, prec_thr = self.get_recall_prec(pred_bool, target_bool) # Compute at various K recalls_k, precs_k = [], [] for k in self.topk: pred_bool = self.topk_to_matrix(pred, k) recall, prec = self.get_recall_prec(pred_bool, target_bool) recalls_k.append(recall) precs_k.append(prec) # Return all return recall_thr, prec_thr, recalls_k, precs_k
Computes the Top-K Accuracies for both single and multi-label scenarios.
topk_accuracy
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def loss_and_target(self, cls_score: Tensor, rois: Tensor, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict, **kwargs) -> dict: """Calculate the loss based on the features extracted by the bbox head. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, num_classes) rois (Tensor): RoIs with the shape (batch_size * num_proposals_single_image, 5) where the first column indicates batch id of each RoI. sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. Returns: dict: A dictionary of loss components. """ cls_targets = self.get_targets(sampling_results, rcnn_train_cfg) labels, _ = cls_targets losses = dict() # Only use the cls_score if cls_score is not None: if self.background_class: labels = labels[:, 1:] # Get valid labels (ignore first one) cls_score = cls_score[:, 1:] pos_inds = torch.sum(labels, dim=-1) > 0 cls_score = cls_score[pos_inds] labels = labels[pos_inds] # Compute First Recall/Precisions # This has to be done first before normalising the label-space. recall_thr, prec_thr, recall_k, prec_k = self.topk_accuracy( cls_score, labels, thr=0.5) losses['recall@thr=0.5'] = recall_thr losses['prec@thr=0.5'] = prec_thr for i, k in enumerate(self.topk): losses[f'recall@top{k}'] = recall_k[i] losses[f'prec@top{k}'] = prec_k[i] # If Single-label, need to ensure that target labels sum to 1: ie # that they are valid probabilities. if not self.multilabel and self.background_class: labels = labels / labels.sum(dim=1, keepdim=True) # Select Loss function based on single/multi-label # NB. Both losses auto-compute sigmoid/softmax on prediction if self.multilabel: loss_func = F.binary_cross_entropy_with_logits else: loss_func = cross_entropy_loss # Compute loss loss = loss_func(cls_score, labels, reduction='none') pt = torch.exp(-loss) F_loss = self.focal_alpha * (1 - pt)**self.focal_gamma * loss losses['loss_action_cls'] = torch.mean(F_loss) return dict(loss_bbox=losses, bbox_targets=cls_targets)
Calculate the loss based on the features extracted by the bbox head. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, num_classes) rois (Tensor): RoIs with the shape (batch_size * num_proposals_single_image, 5) where the first column indicates batch id of each RoI. sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. Returns: dict: A dictionary of loss components.
loss_and_target
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0