code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def loss(self, inputs: torch.Tensor, data_samples: SampleList,
**kwargs) -> dict:
"""Calculate losses from a batch of inputs and data samples.""" | Calculate losses from a batch of inputs and data samples. | loss | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py | Apache-2.0 |
def forward(self, inputs, data_samples, mode: str = 'loss'):
"""The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[``ActionDataSample], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
"""
if mode == 'tensor':
return self.extract_feat(inputs, data_samples)
elif mode == 'loss':
return self.loss(inputs, data_samples)
elif mode == 'predict':
return self.predict(inputs, data_samples)
else:
raise RuntimeError(f'Invalid mode "{mode}".') | The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[``ActionDataSample], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor. | forward | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py | Apache-2.0 |
def encode_vision(self, image):
"""encode image / videos as features.
Args:
image (torch.Tensor): The input images.
Returns: tuple.
- vision_embeds (torch.Tensor): The features of all patches.
Shape: [B,T,L,C].
- pooled_vision_embeds (torch.Tensor): The pooled features.
Shape: [B,T,C].
"""
output_dict = self.vision_encoder(image)
vision_embeds = self.vision_layernorm(output_dict.last_hidden_state)
pooled_vision_embeds = output_dict.pooler_output
return vision_embeds, pooled_vision_embeds | encode image / videos as features.
Args:
image (torch.Tensor): The input images.
Returns: tuple.
- vision_embeds (torch.Tensor): The features of all patches.
Shape: [B,T,L,C].
- pooled_vision_embeds (torch.Tensor): The pooled features.
Shape: [B,T,C]. | encode_vision | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py | Apache-2.0 |
def encode_text(self, text):
"""encode text.
Args:
text (dict): The output of huggingface's `PreTrainedTokenizer`.
contains keys:
- input_ids (torch.Tensor): Token ids to be fed to a model.
Shape: [B,L].
- attention_mask (torch.Tensor): The mask indicate padded tokens.
Shape: [B,L]. 0 is padded token.
- other keys refer to "https://huggingface.co/docs/transformers/v4.21.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__". # noqa: E501
Returns: tuple.
- text_embeds (torch.Tensor): The features of all tokens. Shape: [B,L,C].
- pooled_text_embeds (torch.Tensor): The pooled features. Shape: [B,C].
"""
text_output = self.text_encoder(
text.input_ids,
attention_mask=text.attention_mask,
return_dict=True,
mode='text',
)
text_embeds = text_output.last_hidden_state
pooled_text_embeds = text_embeds[:, 0]
return text_embeds, pooled_text_embeds | encode text.
Args:
text (dict): The output of huggingface's `PreTrainedTokenizer`.
contains keys:
- input_ids (torch.Tensor): Token ids to be fed to a model.
Shape: [B,L].
- attention_mask (torch.Tensor): The mask indicate padded tokens.
Shape: [B,L]. 0 is padded token.
- other keys refer to "https://huggingface.co/docs/transformers/v4.21.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__". # noqa: E501
Returns: tuple.
- text_embeds (torch.Tensor): The features of all tokens. Shape: [B,L,C].
- pooled_text_embeds (torch.Tensor): The pooled features. Shape: [B,C]. | encode_text | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py | Apache-2.0 |
def clip_contrastive_temperature(self, min_val=0.001, max_val=0.5):
"""Seems only used during pre-training."""
self.temp.clamp_(min_val, max_val) | Seems only used during pre-training. | clip_contrastive_temperature | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py | Apache-2.0 |
def preprocess_state_dict(self, state_dict):
"""Preprocess pretrained checkpoint for text_encoder."""
for key in list(state_dict.keys()):
if 'bert' in key:
encoder_key = key.replace('bert.', '')
state_dict[encoder_key] = state_dict[key]
del state_dict[key]
return state_dict | Preprocess pretrained checkpoint for text_encoder. | preprocess_state_dict | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py | Apache-2.0 |
def init_weights(self):
"""Initialize weights."""
constant_init(self.temporal_fc, val=0, bias=0) | Initialize weights. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/common/transformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/transformer.py | Apache-2.0 |
def forward(self, query, key=None, value=None, residual=None, **kwargs):
"""Defines the computation performed at every call."""
assert residual is None, (
'Always adding the shortcut in the forward function')
init_cls_token = query[:, 0, :].unsqueeze(1)
identity = query_t = query[:, 1:, :]
# query_t [batch_size, num_patches * num_frames, embed_dims]
b, pt, m = query_t.size()
p, t = pt // self.num_frames, self.num_frames
# res_temporal [batch_size * num_patches, num_frames, embed_dims]
query_t = self.norm(query_t.reshape(b * p, t, m)).permute(1, 0, 2)
res_temporal = self.attn(query_t, query_t, query_t)[0].permute(1, 0, 2)
res_temporal = self.dropout_layer(
self.proj_drop(res_temporal.contiguous()))
res_temporal = self.temporal_fc(res_temporal)
# res_temporal [batch_size, num_patches * num_frames, embed_dims]
res_temporal = res_temporal.reshape(b, p * t, m)
# ret_value [batch_size, num_patches * num_frames + 1, embed_dims]
new_query_t = identity + res_temporal
new_query = torch.cat((init_cls_token, new_query_t), 1)
return new_query | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/common/transformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/transformer.py | Apache-2.0 |
def init_weights(self):
"""init DividedSpatialAttentionWithNorm by default."""
pass | init DividedSpatialAttentionWithNorm by default. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/common/transformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/transformer.py | Apache-2.0 |
def forward(self, query, key=None, value=None, residual=None, **kwargs):
"""Defines the computation performed at every call."""
assert residual is None, (
'Always adding the shortcut in the forward function')
identity = query
init_cls_token = query[:, 0, :].unsqueeze(1)
query_s = query[:, 1:, :]
# query_s [batch_size, num_patches * num_frames, embed_dims]
b, pt, m = query_s.size()
p, t = pt // self.num_frames, self.num_frames
# cls_token [batch_size * num_frames, 1, embed_dims]
cls_token = init_cls_token.repeat(1, t, 1).reshape(b * t,
m).unsqueeze(1)
# query_s [batch_size * num_frames, num_patches + 1, embed_dims]
query_s = rearrange(query_s, 'b (p t) m -> (b t) p m', p=p, t=t)
query_s = torch.cat((cls_token, query_s), 1)
# res_spatial [batch_size * num_frames, num_patches + 1, embed_dims]
query_s = self.norm(query_s).permute(1, 0, 2)
res_spatial = self.attn(query_s, query_s, query_s)[0].permute(1, 0, 2)
res_spatial = self.dropout_layer(
self.proj_drop(res_spatial.contiguous()))
# cls_token [batch_size, 1, embed_dims]
cls_token = res_spatial[:, 0, :].reshape(b, t, m)
cls_token = torch.mean(cls_token, 1, True)
# res_spatial [batch_size * num_frames, num_patches + 1, embed_dims]
res_spatial = rearrange(
res_spatial[:, 1:, :], '(b t) p m -> b (p t) m', p=p, t=t)
res_spatial = torch.cat((cls_token, res_spatial), 1)
new_query = identity + res_spatial
return new_query | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/common/transformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/transformer.py | Apache-2.0 |
def forward(self, x, residual=None):
"""Defines the computation performed at every call."""
assert residual is None, ('Cannot apply pre-norm with FFNWithNorm')
return super().forward(self.norm(x), x) | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/common/transformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/transformer.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = self.conv_s(x)
x = self.bn_s(x)
x = self.relu(x)
x = self.conv_t(x)
return x | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module. | forward | python | open-mmlab/mmaction2 | mmaction/models/common/conv2plus1d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/conv2plus1d.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters from scratch."""
kaiming_init(self.conv_s)
kaiming_init(self.conv_t)
constant_init(self.bn_s, 1, bias=0) | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/common/conv2plus1d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/conv2plus1d.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# [n, c, h, w]
n, c, h, w = x.size()
num_segments = self.num_segments
num_batches = n // num_segments
assert c == self.in_channels
# [num_batches, c, num_segments, h, w]
x = x.view(num_batches, num_segments, c, h, w)
x = x.permute(0, 2, 1, 3, 4).contiguous()
# [num_batches * c, num_segments, 1, 1]
theta_out = F.adaptive_avg_pool2d(
x.view(-1, num_segments, h, w), (1, 1))
# [num_batches * c, 1, adaptive_kernel_size, 1]
conv_kernel = self.G(theta_out.view(-1, num_segments)).view(
num_batches * c, 1, -1, 1)
# [num_batches, c, num_segments, 1, 1]
local_activation = self.L(theta_out.view(-1, c, num_segments)).view(
num_batches, c, num_segments, 1, 1)
# [num_batches, c, num_segments, h, w]
new_x = x * local_activation
# [1, num_batches * c, num_segments, h * w]
y = F.conv2d(
new_x.view(1, num_batches * c, num_segments, h * w),
conv_kernel,
bias=None,
stride=(self.adaptive_convolution_stride, 1),
padding=(self.adaptive_convolution_padding, 0),
groups=num_batches * c)
# [n, c, h, w]
y = y.view(num_batches, c, num_segments, h, w)
y = y.permute(0, 2, 1, 3, 4).contiguous().view(n, c, h, w)
return y | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module. | forward | python | open-mmlab/mmaction2 | mmaction/models/common/tam.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/tam.py | Apache-2.0 |
def init_weights(self, cfg):
"""Initialize weights."""
if cfg.get('affine', True):
self.weight = torch.nn.Parameter(torch.ones(self.num_features))
self.bias = torch.nn.Parameter(torch.zeros(self.num_features))
self.affine = True
else:
self.affine = False | Initialize weights. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/common/sub_batchnorm3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/sub_batchnorm3d.py | Apache-2.0 |
def _get_aggregated_mean_std(self, means, stds, n):
"""Calculate aggregated mean and std."""
mean = means.view(n, -1).sum(0) / n
std = stds.view(n, -1).sum(0) / n + (
(means.view(n, -1) - mean)**2).view(n, -1).sum(0) / n
return mean.detach(), std.detach() | Calculate aggregated mean and std. | _get_aggregated_mean_std | python | open-mmlab/mmaction2 | mmaction/models/common/sub_batchnorm3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/sub_batchnorm3d.py | Apache-2.0 |
def aggregate_stats(self):
"""Synchronize running_mean, and running_var to self.bn.
Call this before eval, then call model.eval(); When eval, forward
function will call self.bn instead of self.split_bn, During this time
the running_mean, and running_var of self.bn has been obtained from
self.split_bn.
"""
if self.split_bn.track_running_stats:
aggre_func = self._get_aggregated_mean_std
self.bn.running_mean.data, self.bn.running_var.data = aggre_func(
self.split_bn.running_mean, self.split_bn.running_var,
self.num_splits)
self.bn.num_batches_tracked = self.split_bn.num_batches_tracked.detach(
) | Synchronize running_mean, and running_var to self.bn.
Call this before eval, then call model.eval(); When eval, forward
function will call self.bn instead of self.split_bn, During this time
the running_mean, and running_var of self.bn has been obtained from
self.split_bn. | aggregate_stats | python | open-mmlab/mmaction2 | mmaction/models/common/sub_batchnorm3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/sub_batchnorm3d.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call."""
if self.training:
n, c, t, h, w = x.shape
assert n % self.num_splits == 0
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view(-1, 1, 1, 1)
x = x + self.bias.view(-1, 1, 1, 1)
return x | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/common/sub_batchnorm3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/sub_batchnorm3d.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x_1 = self.conv_1(x)
x_2 = self.conv_2(x)
if self.op == 'concat':
out = torch.cat([x_1, x_2], 1)
else:
out = x_1 + x_2
return out | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module. | forward | python | open-mmlab/mmaction2 | mmaction/models/common/conv_audio.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/conv_audio.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters from scratch."""
kaiming_init(self.conv_1.conv)
kaiming_init(self.conv_2.conv)
constant_init(self.conv_1.bn, 1, bias=0)
constant_init(self.conv_2.bn, 1, bias=0) | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/common/conv_audio.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/conv_audio.py | Apache-2.0 |
def init_weights(self):
"""Use the default kaiming_uniform for all nn.linear layers."""
pass | Use the default kaiming_uniform for all nn.linear layers. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/trn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/trn_head.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
# [N, num_segs * hidden_dim]
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/trn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/trn_head.py | Apache-2.0 |
def init_weights(self):
"""Use the default kaiming_uniform for all nn.linear layers."""
pass | Use the default kaiming_uniform for all nn.linear layers. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/trn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/trn_head.py | Apache-2.0 |
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
self.consensus.init_weights() | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/trn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/trn_head.py | Apache-2.0 |
def forward(self, x, num_segs, **kwargs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Useless in TRNHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TRN models. The `self.num_segments` we need is a
hyper parameter to build TRN models.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = torch.flatten(x, 1)
# [N * num_segs, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, num_segs, hidden_dim]
cls_score = self.fc_cls(x)
cls_score = cls_score.view((-1, self.num_segments) +
cls_score.size()[1:])
# [N, num_classes]
cls_score = self.consensus(cls_score)
return cls_score | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Useless in TRNHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TRN models. The `self.num_segments` we need is a
hyper parameter to build TRN models.
Returns:
torch.Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/trn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/trn_head.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std) | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/i3d_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/i3d_head.py | Apache-2.0 |
def forward(self, x: Tensor, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
# [N, in_channels, 4, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N, in_channels, 1, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/i3d_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/i3d_head.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters from scratch."""
if get_str_type(self.init_cfg['type']) == 'Pretrained':
assert self.channel_map is not None, \
'load cls_head weights needs to specify the channel map file'
logger = MMLogger.get_current_instance()
pretrained = self.init_cfg['checkpoint']
logger.info(f'load pretrained model from {pretrained}')
state_dict = _load_checkpoint_with_prefix(
'cls_head.', pretrained, map_location='cpu')
self._select_channels(state_dict)
msg = self.load_state_dict(state_dict, strict=False)
logger.info(msg)
else:
super().init_weights() | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/uniformer_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/uniformer_head.py | Apache-2.0 |
def forward(self, x: Tensor, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
# [N, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/uniformer_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/uniformer_head.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std) | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/tsn_audio_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsn_audio_head.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, h, w]
x = self.avg_pool(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/tsn_audio_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsn_audio_head.py | Apache-2.0 |
def forward(self,
x,
num_segs: Optional[int] = None,
fcn_test: bool = False,
**kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int, optional): Number of segments into which a video
is divided. Defaults to None.
fcn_test (bool): Whether to apply full convolution (fcn) testing.
Defaults to False.
Returns:
Tensor: The classification scores for input samples.
"""
if fcn_test:
if self.avg_pool3d:
x = self.avg_pool3d(x)
if self.new_cls is None:
self._init_new_cls()
x = self.new_cls(x)
cls_score_feat_map = x.view(x.size(0), -1)
return cls_score_feat_map
if self.avg_pool2d is None:
kernel_size = (1, x.shape[-2], x.shape[-1])
self.avg_pool2d = nn.AvgPool3d(kernel_size, stride=1, padding=0)
if num_segs is None:
# [N, in_channels, 3, 7, 7]
x = self.avg_pool3d(x)
else:
# [N * num_segs, in_channels, 7, 7]
x = self.avg_pool2d(x)
# [N * num_segs, in_channels, 1, 1]
x = x.reshape((-1, num_segs) + x.shape[1:])
# [N, num_segs, in_channels, 1, 1]
x = self.consensus(x)
# [N, 1, in_channels, 1, 1]
x = x.squeeze(1)
# [N, in_channels, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int, optional): Number of segments into which a video
is divided. Defaults to None.
fcn_test (bool): Whether to apply full convolution (fcn) testing.
Defaults to False.
Returns:
Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/tpn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tpn_head.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters from scratch."""
trunc_normal_init(self.fc_cls.weight, std=self.init_std)
constant_init(self.fc_cls.bias, 0.02)
self.fc_cls.weight.data.mul_(self.init_scale)
self.fc_cls.bias.data.mul_(self.init_scale) | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/mvit_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/mvit_head.py | Apache-2.0 |
def pre_logits(self, feats: Tuple[List[Tensor]]) -> Tensor:
"""The process before the final classification head.
The input ``feats`` is a tuple of list of tensor, and each tensor is
the feature of a backbone stage.
"""
if self.with_cls_token:
_, cls_token = feats[-1]
return cls_token
else:
patch_token = feats[-1]
return patch_token.mean(dim=(2, 3, 4)) | The process before the final classification head.
The input ``feats`` is a tuple of list of tensor, and each tensor is
the feature of a backbone stage. | pre_logits | python | open-mmlab/mmaction2 | mmaction/models/heads/mvit_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/mvit_head.py | Apache-2.0 |
def forward(self, x: Tuple[List[Tensor]], **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tuple[List[Tensor]]): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
x = self.pre_logits(x)
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tuple[List[Tensor]]): The input data.
Returns:
Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/mvit_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/mvit_head.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std) | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/slowfast_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/slowfast_head.py | Apache-2.0 |
def forward(self, x: Tuple[Tensor], **kwargs) -> None:
"""Defines the computation performed at every call.
Args:
x (tuple[torch.Tensor]): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
# ([N, channel_slow, T1, H, W], [(N, channel_fast, T2, H, W)])
x_slow, x_fast = x
# ([N, channel_slow, 1, 1, 1], [N, channel_fast, 1, 1, 1])
x_slow = self.avg_pool(x_slow)
x_fast = self.avg_pool(x_fast)
# [N, channel_fast + channel_slow, 1, 1, 1]
x = torch.cat((x_fast, x_slow), dim=1)
if self.dropout is not None:
x = self.dropout(x)
# [N x C]
x = x.view(x.size(0), -1)
# [N x num_classes]
cls_score = self.fc_cls(x)
return cls_score | Defines the computation performed at every call.
Args:
x (tuple[torch.Tensor]): The input data.
Returns:
Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/slowfast_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/slowfast_head.py | Apache-2.0 |
def forward(self,
x: Tensor,
num_segs: Optional[int] = None,
**kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int): For 2D backbone. Number of segments into which
a video is divided. Defaults to None.
Returns:
Tensor: The output features after pooling.
"""
if isinstance(x, Tensor):
n_dims = x.ndim
elif isinstance(x, tuple):
n_dims = x[0].ndim
assert self.backbone_name == 'slowfast', \
'Only support SlowFast backbone to input tuple'
else:
raise NotImplementedError(f'Unsupported feature type: {type(x)}')
# For 2D backbone with spatial dimension
if n_dims == 4:
assert num_segs is not None
if self.backbone_name == 'tsm':
assert self.num_segments is not None, \
'Please Specify num_segments for TSM'
num_segs = self.num_segments
# [N, T, channels, H, W]
x = x.view((-1, num_segs) + x.shape[1:])
feat = self.pool1d(self.pool2d(x, dim=[-2, -1]), dim=1)
elif n_dims == 5:
if self.backbone_name == 'slowfast':
x_slow, x_fast = x
assert self.temporal_type is not None, \
'slowfast backbone has to pool temporal dimension'
x_fast = self.pool1d(self.pool2d(x_fast, dim=[-2, -1]), dim=2)
x_slow = self.pool1d(self.pool2d(x_slow, dim=[-2, -1]), dim=2)
feat = torch.cat((x_slow, x_fast), dim=1)
# For GCN-based backbone
elif self.backbone_name == 'gcn':
# N, M, C, T, V
feat = self.pool1d(self.pool2d(x, dim=[-2, -1]), dim=1)
# For 3D backbone with spatial dimension
else:
# [N, channels, T, H, W]
feat = self.pool1d(self.pool2d(x, dim=[-2, -1]), dim=2)
# For backbone output feature without spatial and temporal dimension
elif n_dims == 2:
# [N, channels]
feat = x
return feat | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int): For 2D backbone. Number of segments into which
a video is divided. Defaults to None.
Returns:
Tensor: The output features after pooling. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/feature_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/feature_head.py | Apache-2.0 |
def predict_by_feat(self, feats: Union[Tensor, Tuple[Tensor]],
data_samples) -> Tensor:
"""Integrate multi-view features into one tensor.
Args:
feats (torch.Tensor | tuple[torch.Tensor]): Features from
upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
Tensor: The integrated multi-view features.
"""
num_segs = feats.shape[0] // len(data_samples)
feats = self.average_clip(feats, num_segs=num_segs)
return feats | Integrate multi-view features into one tensor.
Args:
feats (torch.Tensor | tuple[torch.Tensor]): Features from
upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
Tensor: The integrated multi-view features. | predict_by_feat | python | open-mmlab/mmaction2 | mmaction/models/heads/feature_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/feature_head.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std) | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/tsm_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsm_head.py | Apache-2.0 |
def forward(self, x: Tensor, num_segs: int, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int): Useless in TSMHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TSM models. The `self.num_segments` we need is a
hyper parameter to build TSM models.
Returns:
Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = torch.flatten(x, 1)
# [N * num_segs, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N * num_segs, num_classes]
cls_score = self.fc_cls(x)
if self.is_shift and self.temporal_pool:
# [2 * N, num_segs // 2, num_classes]
cls_score = cls_score.view((-1, self.num_segments // 2) +
cls_score.size()[1:])
else:
# [N, num_segs, num_classes]
cls_score = cls_score.view((-1, self.num_segments) +
cls_score.size()[1:])
# [N, 1, num_classes]
cls_score = self.consensus(cls_score)
# [N, num_classes]
return cls_score.squeeze(1) | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int): Useless in TSMHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TSM models. The `self.num_segments` we need is a
hyper parameter to build TSM models.
Returns:
Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/tsm_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsm_head.py | Apache-2.0 |
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
"""Forward features from the upstream network.
Args:
x (torch.Tensor): Features from the upstream network.
Returns:
torch.Tensor: Classification scores with shape (B, num_classes).
"""
N, M, C, T, V = x.shape
x = x.view(N * M, C, T, V)
x = self.pool(x)
x = x.view(N, M, C)
x = x.mean(dim=1)
assert x.shape[1] == self.in_channels
if self.dropout is not None:
x = self.dropout(x)
cls_scores = self.fc(x)
return cls_scores | Forward features from the upstream network.
Args:
x (torch.Tensor): Features from the upstream network.
Returns:
torch.Tensor: Classification scores with shape (B, num_classes). | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/gcn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/gcn_head.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
return x.mean(dim=self.dim, keepdim=True) | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py | Apache-2.0 |
def forward(self, x, **kwargs) -> ForwardResults:
"""Defines the computation performed at every call."""
raise NotImplementedError | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py | Apache-2.0 |
def loss(self, feats: Union[torch.Tensor, Tuple[torch.Tensor]],
data_samples: SampleList, **kwargs) -> Dict:
"""Perform forward propagation of head and loss calculation on the
features of the upstream network.
Args:
feats (torch.Tensor | tuple[torch.Tensor]): Features from
upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
"""
cls_scores = self(feats, **kwargs)
return self.loss_by_feat(cls_scores, data_samples) | Perform forward propagation of head and loss calculation on the
features of the upstream network.
Args:
feats (torch.Tensor | tuple[torch.Tensor]): Features from
upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components. | loss | python | open-mmlab/mmaction2 | mmaction/models/heads/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py | Apache-2.0 |
def loss_by_feat(self, cls_scores: torch.Tensor,
data_samples: SampleList) -> Dict:
"""Calculate the loss based on the features extracted by the head.
Args:
cls_scores (torch.Tensor): Classification prediction results of
all class, has shape (batch_size, num_classes).
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
"""
labels = [x.gt_label for x in data_samples]
labels = torch.stack(labels).to(cls_scores.device)
labels = labels.squeeze()
losses = dict()
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
elif labels.dim() == 1 and labels.size()[0] == self.num_classes \
and cls_scores.size()[0] == 1:
# Fix a bug when training with soft labels and batch size is 1.
# When using soft labels, `labels` and `cls_score` share the same
# shape.
labels = labels.unsqueeze(0)
if cls_scores.size() != labels.size():
top_k_acc = top_k_accuracy(cls_scores.detach().cpu().numpy(),
labels.detach().cpu().numpy(),
self.topk)
for k, a in zip(self.topk, top_k_acc):
losses[f'top{k}_acc'] = torch.tensor(
a, device=cls_scores.device)
if self.label_smooth_eps != 0:
if cls_scores.size() != labels.size():
labels = F.one_hot(labels, num_classes=self.num_classes)
labels = ((1 - self.label_smooth_eps) * labels +
self.label_smooth_eps / self.num_classes)
loss_cls = self.loss_cls(cls_scores, labels)
# loss_cls may be dictionary or single tensor
if isinstance(loss_cls, dict):
losses.update(loss_cls)
else:
losses['loss_cls'] = loss_cls
return losses | Calculate the loss based on the features extracted by the head.
Args:
cls_scores (torch.Tensor): Classification prediction results of
all class, has shape (batch_size, num_classes).
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components. | loss_by_feat | python | open-mmlab/mmaction2 | mmaction/models/heads/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py | Apache-2.0 |
def predict(self, feats: Union[torch.Tensor, Tuple[torch.Tensor]],
data_samples: SampleList, **kwargs) -> SampleList:
"""Perform forward propagation of head and predict recognition results
on the features of the upstream network.
Args:
feats (torch.Tensor | tuple[torch.Tensor]): Features from
upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
list[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`.
"""
cls_scores = self(feats, **kwargs)
return self.predict_by_feat(cls_scores, data_samples) | Perform forward propagation of head and predict recognition results
on the features of the upstream network.
Args:
feats (torch.Tensor | tuple[torch.Tensor]): Features from
upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
list[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`. | predict | python | open-mmlab/mmaction2 | mmaction/models/heads/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py | Apache-2.0 |
def predict_by_feat(self, cls_scores: torch.Tensor,
data_samples: SampleList) -> SampleList:
"""Transform a batch of output features extracted from the head into
prediction results.
Args:
cls_scores (torch.Tensor): Classification scores, has a shape
(B*num_segs, num_classes)
data_samples (list[:obj:`ActionDataSample`]): The
annotation data of every samples. It usually includes
information such as `gt_label`.
Returns:
List[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`.
"""
num_segs = cls_scores.shape[0] // len(data_samples)
cls_scores = self.average_clip(cls_scores, num_segs=num_segs)
pred_labels = cls_scores.argmax(dim=-1, keepdim=True).detach()
for data_sample, score, pred_label in zip(data_samples, cls_scores,
pred_labels):
data_sample.set_pred_score(score)
data_sample.set_pred_label(pred_label)
return data_samples | Transform a batch of output features extracted from the head into
prediction results.
Args:
cls_scores (torch.Tensor): Classification scores, has a shape
(B*num_segs, num_classes)
data_samples (list[:obj:`ActionDataSample`]): The
annotation data of every samples. It usually includes
information such as `gt_label`.
Returns:
List[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`. | predict_by_feat | python | open-mmlab/mmaction2 | mmaction/models/heads/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py | Apache-2.0 |
def average_clip(self,
cls_scores: torch.Tensor,
num_segs: int = 1) -> torch.Tensor:
"""Averaging class scores over multiple clips.
Using different averaging types ('score' or 'prob' or None,
which defined in test_cfg) to computed the final averaged
class score. Only called in test mode.
Args:
cls_scores (torch.Tensor): Class scores to be averaged.
num_segs (int): Number of clips for each input sample.
Returns:
torch.Tensor: Averaged class scores.
"""
if self.average_clips not in ['score', 'prob', None]:
raise ValueError(f'{self.average_clips} is not supported. '
f'Currently supported ones are '
f'["score", "prob", None]')
batch_size = cls_scores.shape[0]
cls_scores = cls_scores.view((batch_size // num_segs, num_segs) +
cls_scores.shape[1:])
if self.average_clips is None:
return cls_scores
elif self.average_clips == 'prob':
cls_scores = F.softmax(cls_scores, dim=2).mean(dim=1)
elif self.average_clips == 'score':
cls_scores = cls_scores.mean(dim=1)
return cls_scores | Averaging class scores over multiple clips.
Using different averaging types ('score' or 'prob' or None,
which defined in test_cfg) to computed the final averaged
class score. Only called in test mode.
Args:
cls_scores (torch.Tensor): Class scores to be averaged.
num_segs (int): Number of clips for each input sample.
Returns:
torch.Tensor: Averaged class scores. | average_clip | python | open-mmlab/mmaction2 | mmaction/models/heads/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters from scratch."""
trunc_normal_init(self.fc_cls, std=self.init_std) | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/timesformer_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/timesformer_head.py | Apache-2.0 |
def forward(self, x: Tensor, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
# [N, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/timesformer_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/timesformer_head.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std) | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/tsn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsn_head.py | Apache-2.0 |
def forward(self, x: Tensor, num_segs: int, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int): Number of segments into which a video
is divided.
Returns:
Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
if isinstance(x, tuple):
shapes = [y.shape for y in x]
assert 1 == 0, f'x is tuple {shapes}'
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = x.reshape((-1, num_segs) + x.shape[1:])
# [N, num_segs, in_channels, 1, 1]
x = self.consensus(x)
# [N, 1, in_channels, 1, 1]
x = x.squeeze(1)
# [N, in_channels, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int): Number of segments into which a video
is divided.
Returns:
Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/tsn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsn_head.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters from scratch."""
normal_init(self.fc_rgb, std=self.init_std)
normal_init(self.fc_pose, std=self.init_std) | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def forward(self, x: Tuple[torch.Tensor]) -> Dict:
"""Defines the computation performed at every call."""
x_rgb, x_pose = self.avg_pool(x[0]), self.avg_pool(x[1])
x_rgb = x_rgb.view(x_rgb.size(0), -1)
x_pose = x_pose.view(x_pose.size(0), -1)
x_rgb = self.dropout_rgb(x_rgb)
x_pose = self.dropout_pose(x_pose)
cls_scores = dict()
cls_scores['rgb'] = self.fc_rgb(x_rgb)
cls_scores['pose'] = self.fc_pose(x_pose)
return cls_scores | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def loss(self, feats: Tuple[torch.Tensor], data_samples: SampleList,
**kwargs) -> Dict:
"""Perform forward propagation of head and loss calculation on the
features of the upstream network.
Args:
feats (tuple[torch.Tensor]): Features from upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
"""
cls_scores = self(feats, **kwargs)
return self.loss_by_feat(cls_scores, data_samples) | Perform forward propagation of head and loss calculation on the
features of the upstream network.
Args:
feats (tuple[torch.Tensor]): Features from upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components. | loss | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def loss_by_feat(self, cls_scores: Dict[str, torch.Tensor],
data_samples: SampleList) -> Dict:
"""Calculate the loss based on the features extracted by the head.
Args:
cls_scores (dict[str, torch.Tensor]): The dict of
classification scores,
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
"""
labels = torch.stack([x.gt_label for x in data_samples])
labels = labels.squeeze()
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
elif labels.dim() == 1 and labels.size()[0] == self.num_classes \
and cls_scores.size()[0] == 1:
# Fix a bug when training with soft labels and batch size is 1.
# When using soft labels, `labels` and `cls_score` share the same
# shape.
labels = labels.unsqueeze(0)
losses = dict()
for loss_name, weight in zip(self.loss_components, self.loss_weights):
cls_score = cls_scores[loss_name]
loss_cls = self.loss_by_scores(cls_score, labels)
loss_cls = {loss_name + '_' + k: v for k, v in loss_cls.items()}
loss_cls[f'{loss_name}_loss_cls'] *= weight
losses.update(loss_cls)
return losses | Calculate the loss based on the features extracted by the head.
Args:
cls_scores (dict[str, torch.Tensor]): The dict of
classification scores,
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components. | loss_by_feat | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def loss_by_scores(self, cls_scores: torch.Tensor,
labels: torch.Tensor) -> Dict:
"""Calculate the loss based on the features extracted by the head.
Args:
cls_scores (torch.Tensor): Classification prediction
results of all class, has shape (batch_size, num_classes).
labels (torch.Tensor): The labels used to calculate the loss.
Returns:
dict: A dictionary of loss components.
"""
losses = dict()
if cls_scores.size() != labels.size():
top_k_acc = top_k_accuracy(cls_scores.detach().cpu().numpy(),
labels.detach().cpu().numpy(),
self.topk)
for k, a in zip(self.topk, top_k_acc):
losses[f'top{k}_acc'] = torch.tensor(
a, device=cls_scores.device)
if self.label_smooth_eps != 0:
if cls_scores.size() != labels.size():
labels = F.one_hot(labels, num_classes=self.num_classes)
labels = ((1 - self.label_smooth_eps) * labels +
self.label_smooth_eps / self.num_classes)
loss_cls = self.loss_cls(cls_scores, labels)
# loss_cls may be dictionary or single tensor
if isinstance(loss_cls, dict):
losses.update(loss_cls)
else:
losses['loss_cls'] = loss_cls
return losses | Calculate the loss based on the features extracted by the head.
Args:
cls_scores (torch.Tensor): Classification prediction
results of all class, has shape (batch_size, num_classes).
labels (torch.Tensor): The labels used to calculate the loss.
Returns:
dict: A dictionary of loss components. | loss_by_scores | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def predict(self, feats: Tuple[torch.Tensor], data_samples: SampleList,
**kwargs) -> SampleList:
"""Perform forward propagation of head and predict recognition results
on the features of the upstream network.
Args:
feats (tuple[torch.Tensor]): Features from upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
list[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`.
"""
cls_scores = self(feats, **kwargs)
return self.predict_by_feat(cls_scores, data_samples) | Perform forward propagation of head and predict recognition results
on the features of the upstream network.
Args:
feats (tuple[torch.Tensor]): Features from upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
list[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`. | predict | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def predict_by_feat(self, cls_scores: Dict[str, torch.Tensor],
data_samples: SampleList) -> SampleList:
"""Transform a batch of output features extracted from the head into
prediction results.
Args:
cls_scores (dict[str, torch.Tensor]): The dict of
classification scores,
data_samples (list[:obj:`ActionDataSample`]): The
annotation data of every samples. It usually includes
information such as `gt_label`.
Returns:
list[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`.
"""
pred_scores = [dict() for _ in range(len(data_samples))]
for name in self.loss_components:
cls_score = cls_scores[name]
cls_score = self.predict_by_scores(cls_score, data_samples)
for pred_score, score in zip(pred_scores, cls_score):
pred_score[f'{name}'] = score
for data_sample, pred_score, in zip(data_samples, pred_scores):
data_sample.set_pred_score(pred_score)
return data_samples | Transform a batch of output features extracted from the head into
prediction results.
Args:
cls_scores (dict[str, torch.Tensor]): The dict of
classification scores,
data_samples (list[:obj:`ActionDataSample`]): The
annotation data of every samples. It usually includes
information such as `gt_label`.
Returns:
list[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`. | predict_by_feat | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def predict_by_scores(self, cls_scores: torch.Tensor,
data_samples: SampleList) -> torch.Tensor:
"""Transform a batch of output features extracted from the head into
prediction results.
Args:
cls_scores (torch.Tensor): Classification scores, has a shape
(B*num_segs, num_classes)
data_samples (list[:obj:`ActionDataSample`]): The annotation
data of every samples.
Returns:
torch.Tensor: The averaged classification scores.
"""
num_segs = cls_scores.shape[0] // len(data_samples)
cls_scores = self.average_clip(cls_scores, num_segs=num_segs)
return cls_scores | Transform a batch of output features extracted from the head into
prediction results.
Args:
cls_scores (torch.Tensor): Classification scores, has a shape
(B*num_segs, num_classes)
data_samples (list[:obj:`ActionDataSample`]): The annotation
data of every samples.
Returns:
torch.Tensor: The averaged classification scores. | predict_by_scores | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def forward(self, x: Tensor, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
if len(x.shape) == 4:
cls_score = self.fc2d(x)
else:
cls_score = self.fc3d(x)
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/omni_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/omni_head.py | Apache-2.0 |
def loss_by_feat(self, cls_scores: Union[Tensor, Tuple[Tensor]],
data_samples: SampleList) -> dict:
"""Calculate the loss based on the features extracted by the head.
Args:
cls_scores (Tensor): Classification prediction results of
all class, has shape (batch_size, num_classes).
data_samples (List[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
"""
labels = [x.gt_label for x in data_samples]
labels = torch.stack(labels).to(cls_scores.device)
labels = labels.squeeze()
losses = dict()
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
elif labels.dim() == 1 and cls_scores.size()[0] == 1:
# Fix a bug when training with soft labels and batch size is 1.
# When using soft labels, `labels` and `cls_socre` share the same
# shape.
labels = labels.unsqueeze(0)
if cls_scores.size() != labels.size():
top_k_acc = top_k_accuracy(cls_scores.detach().cpu().numpy(),
labels.detach().cpu().numpy(),
self.topk)
for k, a in zip(self.topk, top_k_acc):
losses[f'top{k}_acc'] = torch.tensor(
a, device=cls_scores.device)
if self.label_smooth_eps != 0:
if cls_scores.size() != labels.size():
labels = F.one_hot(labels, num_classes=self.num_classes)
labels = ((1 - self.label_smooth_eps) * labels +
self.label_smooth_eps / self.num_classes)
loss_cls = self.loss_cls(cls_scores, labels)
# loss_cls may be dictionary or single tensor
if isinstance(loss_cls, dict):
losses.update(loss_cls)
else:
losses['loss_cls'] = loss_cls
return losses | Calculate the loss based on the features extracted by the head.
Args:
cls_scores (Tensor): Classification prediction results of
all class, has shape (batch_size, num_classes).
data_samples (List[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components. | loss_by_feat | python | open-mmlab/mmaction2 | mmaction/models/heads/omni_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/omni_head.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters from scratch."""
normal_init(self.fc1, std=self.init_std)
normal_init(self.fc2, std=self.init_std) | Initiate the parameters from scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/heads/x3d_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/x3d_head.py | Apache-2.0 |
def forward(self, x: Tensor, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
# [N, in_channels, T, H, W]
assert self.pool is not None
x = self.pool(x)
# [N, in_channels, 1, 1, 1]
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
x = self.fc1(x)
# [N, 2048]
x = self.relu(x)
if self.dropout is not None:
x = self.dropout(x)
cls_score = self.fc2(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/x3d_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/x3d_head.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
return self.relu(self.tcn(self.gcn(x)) + self.residual(x)) | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/aagcn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/aagcn.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
N, M, T, V, C = x.size()
x = x.permute(0, 1, 3, 4, 2).contiguous()
if self.data_bn_type == 'MVC':
x = self.data_bn(x.view(N, M * V * C, T))
else:
x = self.data_bn(x.view(N * M, V * C, T))
x = x.view(N, M, V, C, T).permute(0, 1, 3, 4,
2).contiguous().view(N * M, C, T, V)
for i in range(self.num_stages):
x = self.gcn[i](x)
x = x.reshape((N, M) + x.shape[1:])
return x | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/aagcn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/aagcn.py | Apache-2.0 |
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False | Prevent all the parameters from being optimized before
``self.frozen_stages``. | _freeze_stages | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet2plus1d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet2plus1d.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1(x)
x = self.maxpool(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
# no pool2 in R(2+1)d
x = res_layer(x)
return x | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet2plus1d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet2plus1d.py | Apache-2.0 |
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.block.conv1(x)
out = self.tam(out)
out = self.block.conv2(out)
out = self.block.conv3(out)
if self.block.downsample is not None:
identity = self.block.downsample(x)
out = out + identity
return out | Forward wrapper for utilizing checkpoint. | forward._inner_forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/tanet.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/tanet.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
assert isinstance(self.block, Bottleneck)
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.block.conv1(x)
out = self.tam(out)
out = self.block.conv2(out)
out = self.block.conv3(out)
if self.block.downsample is not None:
identity = self.block.downsample(x)
out = out + identity
return out
if self.block.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.block.relu(out)
return out | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/tanet.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/tanet.py | Apache-2.0 |
def init_weights(self):
"""Initialize weights."""
pass | Initialize weights. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/backbones/tanet.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/tanet.py | Apache-2.0 |
def make_tam_modeling(self):
"""Replace ResNet-Block with TA-Block."""
def make_tam_block(stage, num_segments, tam_cfg=dict()):
blocks = list(stage.children())
for i, block in enumerate(blocks):
blocks[i] = TABlock(block, num_segments, deepcopy(tam_cfg))
return nn.Sequential(*blocks)
for i in range(self.num_stages):
layer_name = f'layer{i + 1}'
res_layer = getattr(self, layer_name)
setattr(self, layer_name,
make_tam_block(res_layer, self.num_segments, self.tam_cfg)) | Replace ResNet-Block with TA-Block. | make_tam_modeling | python | open-mmlab/mmaction2 | mmaction/models/backbones/tanet.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/tanet.py | Apache-2.0 |
def linear_sampler(data, offset):
"""Differentiable Temporal-wise Frame Sampling, which is essentially a
linear interpolation process.
It gets the feature map which has been split into several groups
and shift them by different offsets according to their groups.
Then compute the weighted sum along with the temporal dimension.
Args:
data (torch.Tensor): Split data for certain group in shape
[N, num_segments, C, H, W].
offset (torch.Tensor): Data offsets for this group data in shape
[N, num_segments].
"""
# [N, num_segments, C, H, W]
n, t, c, h, w = data.shape
# offset0, offset1: [N, num_segments]
offset0 = torch.floor(offset).int()
offset1 = offset0 + 1
# data, data0, data1: [N, num_segments, C, H * W]
data = data.view(n, t, c, h * w).contiguous()
try:
from mmcv.ops import tin_shift
except (ImportError, ModuleNotFoundError):
raise ImportError('Failed to import `tin_shift` from `mmcv.ops`. You '
'will be unable to use TIN. ')
data0 = tin_shift(data, offset0)
data1 = tin_shift(data, offset1)
# weight0, weight1: [N, num_segments]
weight0 = 1 - (offset - offset0.float())
weight1 = 1 - weight0
# weight0, weight1:
# [N, num_segments] -> [N, num_segments, C // num_segments] -> [N, C]
group_size = offset.shape[1]
weight0 = weight0[:, :, None].repeat(1, 1, c // group_size)
weight0 = weight0.view(weight0.size(0), -1)
weight1 = weight1[:, :, None].repeat(1, 1, c // group_size)
weight1 = weight1.view(weight1.size(0), -1)
# weight0, weight1: [N, C] -> [N, 1, C, 1]
weight0 = weight0[:, None, :, None]
weight1 = weight1[:, None, :, None]
# output: [N, num_segments, C, H * W] -> [N, num_segments, C, H, W]
output = weight0 * data0 + weight1 * data1
output = output.view(n, t, c, h, w)
return output | Differentiable Temporal-wise Frame Sampling, which is essentially a
linear interpolation process.
It gets the feature map which has been split into several groups
and shift them by different offsets according to their groups.
Then compute the weighted sum along with the temporal dimension.
Args:
data (torch.Tensor): Split data for certain group in shape
[N, num_segments, C, H, W].
offset (torch.Tensor): Data offsets for this group data in shape
[N, num_segments]. | linear_sampler | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_tin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tin.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# input shape: [num_batches * num_segments, C, H, W]
# output x shape: [num_batches * num_segments, C, H, W]
x = self.net1(x)
# [num_batches * num_segments, C, H, W]
x = self.net2(x)
return x | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_tin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tin.py | Apache-2.0 |
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
# we set the initial bias of the convolution
# layer to 0, and the final initial output will be 1.0
self.conv.bias.data[...] = 0 | Initiate the parameters either from existing checkpoint or from
scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_tin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tin.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# calculate weight
# [N, C, T]
n, _, t = x.shape
# [N, groups, T]
x = self.conv(x)
x = x.view(n, self.groups, t)
# [N, T, groups]
x = x.permute(0, 2, 1)
# scale the output to range (0, 2)
x = 2 * self.sigmoid(x)
# [N, T, groups]
return x | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_tin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tin.py | Apache-2.0 |
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
# The bias of the last fc layer is initialized to
# make the post-sigmoid output start from 1
self.fc2.bias.data[...] = 0.5108 | Initiate the parameters either from existing checkpoint or from
scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_tin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tin.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# calculate offset
# [N, C, T]
n, _, t = x.shape
# [N, 1, T]
x = self.conv(x)
# [N, T]
x = x.view(n, t)
# [N, T]
x = self.relu(self.fc1(x))
# [N, groups]
x = self.fc2(x)
# [N, 1, groups]
x = x.view(n, 1, -1)
# to make sure the output is in (-t/2, t/2)
# where t = num_segments = 8
x = 4 * (self.sigmoid(x) - 0.5)
# [N, 1, groups]
return x | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_tin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tin.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# x: [N, C, H, W],
# where N = num_batches x num_segments, C = shift_div * num_folds
n, c, h, w = x.size()
num_batches = n // self.num_segments
num_folds = c // self.shift_div
# x_out: [num_batches x num_segments, C, H, W]
x_out = torch.zeros((n, c, h, w), device=x.device)
# x_descriptor: [num_batches, num_segments, num_folds, H, W]
x_descriptor = x[:, :num_folds, :, :].view(num_batches,
self.num_segments,
num_folds, h, w)
# x should only obtain information on temporal and channel dimensions
# x_pooled: [num_batches, num_segments, num_folds, W]
x_pooled = torch.mean(x_descriptor, 3)
# x_pooled: [num_batches, num_segments, num_folds]
x_pooled = torch.mean(x_pooled, 3)
# x_pooled: [num_batches, num_folds, num_segments]
x_pooled = x_pooled.permute(0, 2, 1).contiguous()
# Calculate weight and bias, here groups = 2
# x_offset: [num_batches, groups]
x_offset = self.offset_net(x_pooled).view(num_batches, -1)
# x_weight: [num_batches, num_segments, groups]
x_weight = self.weight_net(x_pooled)
# x_offset: [num_batches, 2 * groups]
x_offset = torch.cat([x_offset, -x_offset], 1)
# x_shift: [num_batches, num_segments, num_folds, H, W]
x_shift = linear_sampler(x_descriptor, x_offset)
# x_weight: [num_batches, num_segments, groups, 1]
x_weight = x_weight[:, :, :, None]
# x_weight:
# [num_batches, num_segments, groups * 2, c // self.shift_div // 4]
x_weight = x_weight.repeat(1, 1, 2, num_folds // 2 // 2)
# x_weight:
# [num_batches, num_segments, c // self.shift_div = num_folds]
x_weight = x_weight.view(x_weight.size(0), x_weight.size(1), -1)
# x_weight: [num_batches, num_segments, num_folds, 1, 1]
x_weight = x_weight[:, :, :, None, None]
# x_shift: [num_batches, num_segments, num_folds, H, W]
x_shift = x_shift * x_weight
# x_shift: [num_batches, num_segments, num_folds, H, W]
x_shift = x_shift.contiguous().view(n, num_folds, h, w)
# x_out: [num_batches x num_segments, C, H, W]
x_out[:, :num_folds, :] = x_shift
x_out[:, num_folds:, :] = x[:, num_folds:, :]
return x_out | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_tin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tin.py | Apache-2.0 |
def make_block_interlace(stage, num_segments, shift_div):
"""Apply Deformable shift for a ResNet layer module.
Args:
stage (nn.module): A ResNet layer to be deformed.
num_segments (int): Number of frame segments.
shift_div (int): Number of division parts for shift.
Returns:
nn.Sequential: A Sequential container consisted of
deformed Interlace blocks.
"""
blocks = list(stage.children())
for i, b in enumerate(blocks):
if i % n_round == 0:
tds = TemporalInterlace(
b.conv1.in_channels,
num_segments=num_segments,
shift_div=shift_div)
blocks[i].conv1.conv = CombineNet(tds,
blocks[i].conv1.conv)
return nn.Sequential(*blocks) | Apply Deformable shift for a ResNet layer module.
Args:
stage (nn.module): A ResNet layer to be deformed.
num_segments (int): Number of frame segments.
shift_div (int): Number of division parts for shift.
Returns:
nn.Sequential: A Sequential container consisted of
deformed Interlace blocks. | make_temporal_interlace.make_block_interlace | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_tin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tin.py | Apache-2.0 |
def make_temporal_interlace(self):
"""Make temporal interlace for some layers."""
num_segment_list = [self.num_segments] * 4
assert num_segment_list[-1] > 0
n_round = 1
if len(list(self.layer3.children())) >= 23:
print(f'=> Using n_round {n_round} to insert temporal shift.')
def make_block_interlace(stage, num_segments, shift_div):
"""Apply Deformable shift for a ResNet layer module.
Args:
stage (nn.module): A ResNet layer to be deformed.
num_segments (int): Number of frame segments.
shift_div (int): Number of division parts for shift.
Returns:
nn.Sequential: A Sequential container consisted of
deformed Interlace blocks.
"""
blocks = list(stage.children())
for i, b in enumerate(blocks):
if i % n_round == 0:
tds = TemporalInterlace(
b.conv1.in_channels,
num_segments=num_segments,
shift_div=shift_div)
blocks[i].conv1.conv = CombineNet(tds,
blocks[i].conv1.conv)
return nn.Sequential(*blocks)
self.layer1 = make_block_interlace(self.layer1, num_segment_list[0],
self.shift_div)
self.layer2 = make_block_interlace(self.layer2, num_segment_list[1],
self.shift_div)
self.layer3 = make_block_interlace(self.layer3, num_segment_list[2],
self.shift_div)
self.layer4 = make_block_interlace(self.layer4, num_segment_list[3],
self.shift_div) | Make temporal interlace for some layers. | make_temporal_interlace | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_tin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tin.py | Apache-2.0 |
def batch_norm(inputs: torch.Tensor,
module: nn.modules.batchnorm,
training: Optional[bool] = None) -> torch.Tensor:
"""Applies Batch Normalization for each channel across a batch of data
using params from the given batch normalization module.
Args:
inputs (Tensor): The input data.
module (nn.modules.batchnorm): a batch normalization module. Will use
params from this batch normalization module to do the operation.
training (bool, optional): if true, apply the train mode batch
normalization. Defaults to None and will use the training mode of
the module.
"""
if training is None:
training = module.training
return F.batch_norm(
input=inputs,
running_mean=None if training else module.running_mean,
running_var=None if training else module.running_var,
weight=module.weight,
bias=module.bias,
training=training,
momentum=module.momentum,
eps=module.eps) | Applies Batch Normalization for each channel across a batch of data
using params from the given batch normalization module.
Args:
inputs (Tensor): The input data.
module (nn.modules.batchnorm): a batch normalization module. Will use
params from this batch normalization module to do the operation.
training (bool, optional): if true, apply the train mode batch
normalization. Defaults to None and will use the training mode of
the module. | batch_norm | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_omni.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_omni.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call.
Accept both 3D (BCTHW for videos) and 2D (BCHW for images) tensors.
"""
if x.ndim == 4:
return self.forward_2d(x)
# Forward call for 3D tensors.
out = self.conv1(x)
out = self.bn1(out).relu_()
out = self.conv2(out)
out = self.bn2(out).relu_()
out = self.conv3(out)
out = self.bn3(out)
if hasattr(self, 'downsample'):
x = self.downsample(x)
return out.add_(x).relu_() | Defines the computation performed at every call.
Accept both 3D (BCTHW for videos) and 2D (BCHW for images) tensors. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_omni.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_omni.py | Apache-2.0 |
def forward_2d(self, x: torch.Tensor) -> torch.Tensor:
"""Forward call for 2D tensors."""
out = F.conv2d(x, self.conv1.weight.sum(2))
out = batch_norm(out, self.bn1).relu_()
out = F.conv2d(
out,
self.conv2.weight.squeeze(2),
stride=self.conv2.stride[-1],
padding=1)
out = batch_norm(out, self.bn2).relu_()
out = F.conv2d(out, self.conv3.weight.squeeze(2))
out = batch_norm(out, self.bn3)
if hasattr(self, 'downsample'):
x = F.conv2d(
x,
self.downsample[0].weight.squeeze(2),
stride=self.downsample[0].stride[-1])
x = batch_norm(x, self.downsample[1])
return out.add_(x).relu_() | Forward call for 2D tensors. | forward_2d | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_omni.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_omni.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call.
Accept both 3D (BCTHW for videos) and 2D (BCHW for images) tensors.
"""
if x.ndim == 4:
return self.forward_2d(x)
# Forward call for 3D tensors.
x = self.conv1(x)
x = self.bn1(x).relu_()
x = self.pool3d(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x | Defines the computation performed at every call.
Accept both 3D (BCTHW for videos) and 2D (BCHW for images) tensors. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_omni.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_omni.py | Apache-2.0 |
def forward_2d(self, x: torch.Tensor) -> torch.Tensor:
"""Forward call for 2D tensors."""
x = F.conv2d(
x,
self.conv1.weight.squeeze(2),
stride=self.conv1.stride[-1],
padding=self.conv1.padding[-1])
x = batch_norm(x, self.bn1).relu_()
x = self.pool2d(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x | Forward call for 2D tensors. | forward_2d | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_omni.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_omni.py | Apache-2.0 |
def train(self, mode=True):
"""Set the optimization status when training."""
super(ResNet3d, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
if self.bn_frozen:
for param in m.parameters():
param.requires_grad = False | Set the optimization status when training. | train | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet3d_csn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_csn.py | Apache-2.0 |
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = MMLogger.get_current_instance()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, nn.Linear):
normal_init(m, std=self.init_std)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None') | Initiate the parameters either from existing checkpoint or from
scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/backbones/c3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/c3d.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
the size of x is (num_batches, 3, 16, 112, 112).
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1a(x)
x = self.pool1(x)
x = self.conv2a(x)
x = self.pool2(x)
x = self.conv3a(x)
x = self.conv3b(x)
x = self.pool3(x)
x = self.conv4a(x)
x = self.conv4b(x)
x = self.pool4(x)
x = self.conv5a(x)
x = self.conv5b(x)
x = self.pool5(x)
x = x.flatten(start_dim=1)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
return x | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
the size of x is (num_batches, 3, 16, 112, 112).
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/c3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/c3d.py | Apache-2.0 |
def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
"""Make divisible function.
This function rounds the channel number down to the nearest value that can
be divisible by the divisor.
Args:
value (int): The original channel number.
divisor (int): The divisor to fully divide the channel number.
min_value (int, optional): The minimum value of the output channel.
Defaults to None, means that the minimum value equal to the
divisor.
min_ratio (float, optional): The minimum ratio of the rounded channel
number to the original channel number. Defaults to 0.9.
Returns:
int: The modified output channel number
"""
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than (1-min_ratio).
if new_value < min_ratio * value:
new_value += divisor
return new_value | Make divisible function.
This function rounds the channel number down to the nearest value that can
be divisible by the divisor.
Args:
value (int): The original channel number.
divisor (int): The divisor to fully divide the channel number.
min_value (int, optional): The minimum value of the output channel.
Defaults to None, means that the minimum value equal to the
divisor.
min_ratio (float, optional): The minimum ratio of the rounded channel
number to the original channel number. Defaults to 0.9.
Returns:
int: The modified output channel number | make_divisible | python | open-mmlab/mmaction2 | mmaction/models/backbones/mobilenet_v2.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The output of the module.
"""
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The output of the module. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/mobilenet_v2.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2.py | Apache-2.0 |
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Defaults to 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Defaults to 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride,
expand_ratio=expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers) | Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Defaults to 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Defaults to 6. | make_layer | python | open-mmlab/mmaction2 | mmaction/models/backbones/mobilenet_v2.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor or Tuple[Tensor]: The feature of the input samples extracted
by the backbone.
"""
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
return tuple(outs) | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor or Tuple[Tensor]: The feature of the input samples extracted
by the backbone. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/mobilenet_v2.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2.py | Apache-2.0 |
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer_name = self.layers[i - 1]
layer = getattr(self, layer_name)
layer.eval()
for param in layer.parameters():
param.requires_grad = False | Prevent all the parameters from being optimized before
``self.frozen_stages``. | _freeze_stages | python | open-mmlab/mmaction2 | mmaction/models/backbones/mobilenet_v2.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2.py | Apache-2.0 |
def train(self, mode=True):
"""Set the optimization status when training."""
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() | Set the optimization status when training. | train | python | open-mmlab/mmaction2 | mmaction/models/backbones/mobilenet_v2.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2.py | Apache-2.0 |
def forward(self, x: Tensor) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data with size of (B, N, C).
Returns:
Tensor: The output of the attention block, same size as inputs.
"""
B, N, C = x.shape
if hasattr(self, 'q_bias'):
k_bias = torch.zeros_like(self.v_bias, requires_grad=False)
qkv_bias = torch.cat((self.q_bias, k_bias, self.v_bias))
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
else:
qkv = self.qkv(x)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x | Defines the computation performed at every call.
Args:
x (Tensor): The input data with size of (B, N, C).
Returns:
Tensor: The output of the attention block, same size as inputs. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/vit_mae.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/vit_mae.py | Apache-2.0 |
def forward(self, x: Tensor) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data with size of (B, N, C).
Returns:
Tensor: The output of the transformer block, same size as inputs.
"""
if hasattr(self, 'gamma_1'):
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x)))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x | Defines the computation performed at every call.
Args:
x (Tensor): The input data with size of (B, N, C).
Returns:
Tensor: The output of the transformer block, same size as inputs. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/vit_mae.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/vit_mae.py | Apache-2.0 |
def get_sinusoid_encoding(n_position: int, embed_dims: int) -> Tensor:
"""Generate sinusoid encoding table.
Sinusoid encoding is a kind of relative position encoding method came from
`Attention Is All You Need<https://arxiv.org/abs/1706.03762>`_.
Args:
n_position (int): The length of the input token.
embed_dims (int): The position embedding dimension.
Returns:
:obj:`torch.FloatTensor`: The sinusoid encoding table of size
(1, n_position, embed_dims)
"""
vec = torch.arange(embed_dims, dtype=torch.float64)
vec = (vec - vec % 2) / embed_dims
vec = torch.pow(10000, -vec).view(1, -1)
sinusoid_table = torch.arange(n_position).view(-1, 1) * vec
sinusoid_table[:, 0::2].sin_() # dim 2i
sinusoid_table[:, 1::2].cos_() # dim 2i+1
sinusoid_table = sinusoid_table.to(torch.float32)
return sinusoid_table.unsqueeze(0) | Generate sinusoid encoding table.
Sinusoid encoding is a kind of relative position encoding method came from
`Attention Is All You Need<https://arxiv.org/abs/1706.03762>`_.
Args:
n_position (int): The length of the input token.
embed_dims (int): The position embedding dimension.
Returns:
:obj:`torch.FloatTensor`: The sinusoid encoding table of size
(1, n_position, embed_dims) | get_sinusoid_encoding | python | open-mmlab/mmaction2 | mmaction/models/backbones/vit_mae.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/vit_mae.py | Apache-2.0 |
def forward(self, x: Tensor) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The feature of the input
samples extracted by the backbone.
"""
b, _, _, h, w = x.shape
h //= self.patch_size
w //= self.patch_size
x = self.patch_embed(x)[0]
if (h, w) != self.grid_size:
pos_embed = self.pos_embed.reshape(-1, *self.grid_size,
self.embed_dims)
pos_embed = pos_embed.permute(0, 3, 1, 2)
pos_embed = F.interpolate(
pos_embed, size=(h, w), mode='bicubic', align_corners=False)
pos_embed = pos_embed.permute(0, 2, 3, 1).flatten(1, 2)
pos_embed = pos_embed.reshape(1, -1, self.embed_dims)
else:
pos_embed = self.pos_embed
x = x + pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
if self.return_feat_map:
x = x.reshape(b, -1, h, w, self.embed_dims)
x = x.permute(0, 4, 1, 2, 3)
return x
if self.fc_norm is not None:
return self.fc_norm(x.mean(1))
return x[:, 0] | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The feature of the input
samples extracted by the backbone. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/vit_mae.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/vit_mae.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.