code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def extract_feat(self, inputs: Dict[str, torch.Tensor], stage: str = 'backbone', data_samples: OptSampleList = None, test_mode: bool = False) -> Tuple: """Extract features. Args: inputs (dict[str, torch.Tensor]): The multi-modal input data. stage (str): Which stage to output the feature. Defaults to ``'backbone'``. data_samples (list[:obj:`ActionDataSample`], optional): Action data samples, which are only needed in training. Defaults to None. test_mode (bool): Whether in test mode. Defaults to False. Returns: tuple[torch.Tensor]: The extracted features. dict: A dict recording the kwargs for downstream pipeline. """ # [N, num_views, C, T, H, W] -> # [N * num_views, C, T, H, W] for m, m_data in inputs.items(): m_data = m_data.reshape((-1, ) + m_data.shape[2:]) inputs[m] = m_data # Record the kwargs required by `loss` and `predict` loss_predict_kwargs = dict() x = self.backbone(**inputs) if stage == 'backbone': return x, loss_predict_kwargs if self.with_cls_head and stage == 'head': x = self.cls_head(x, **loss_predict_kwargs) return x, loss_predict_kwargs
Extract features. Args: inputs (dict[str, torch.Tensor]): The multi-modal input data. stage (str): Which stage to output the feature. Defaults to ``'backbone'``. data_samples (list[:obj:`ActionDataSample`], optional): Action data samples, which are only needed in training. Defaults to None. test_mode (bool): Whether in test mode. Defaults to False. Returns: tuple[torch.Tensor]: The extracted features. dict: A dict recording the kwargs for downstream pipeline.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer3d_mm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer3d_mm.py
Apache-2.0
def extract_feat(self, inputs: torch.Tensor, **kwargs) -> ForwardResults: """Extract features from raw inputs."""
Extract features from raw inputs.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def with_neck(self) -> bool: """bool: whether the recognizer has a neck""" return hasattr(self, 'neck') and self.neck is not None
bool: whether the recognizer has a neck
with_neck
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def with_cls_head(self) -> bool: """bool: whether the recognizer has a cls_head""" return hasattr(self, 'cls_head') and self.cls_head is not None
bool: whether the recognizer has a cls_head
with_cls_head
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def init_weights(self) -> None: """Initialize the model network weights.""" if self.backbone_from in ['torchvision', 'timm']: warnings.warn('We do not initialize weights for backbones in ' f'{self.backbone_from}, since the weights for ' f'backbones in {self.backbone_from} are initialized ' 'in their __init__ functions.') def fake_init(): pass # avoid repeated initialization self.backbone.init_weights = fake_init super().init_weights()
Initialize the model network weights.
init_weights
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def loss(self, inputs: torch.Tensor, data_samples: SampleList, **kwargs) -> dict: """Calculate losses from a batch of inputs and data samples. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. data_samples (List[``ActionDataSample``]): The batch data samples. It usually includes information such as ``gt_label``. Returns: dict: A dictionary of loss components. """ feats, loss_kwargs = \ self.extract_feat(inputs, data_samples=data_samples) # loss_aux will be a empty dict if `self.with_neck` is False. loss_aux = loss_kwargs.get('loss_aux', dict()) loss_cls = self.cls_head.loss(feats, data_samples, **loss_kwargs) losses = merge_dict(loss_cls, loss_aux) return losses
Calculate losses from a batch of inputs and data samples. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. data_samples (List[``ActionDataSample``]): The batch data samples. It usually includes information such as ``gt_label``. Returns: dict: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def predict(self, inputs: torch.Tensor, data_samples: SampleList, **kwargs) -> SampleList: """Predict results from a batch of inputs and data samples with post- processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. data_samples (List[``ActionDataSample``]): The batch data samples. It usually includes information such as ``gt_label``. Returns: List[``ActionDataSample``]: Return the recognition results. The returns value is ``ActionDataSample``, which usually contains ``pred_scores``. And the ``pred_scores`` usually contains following keys. - item (torch.Tensor): Classification scores, has a shape (num_classes, ) """ feats, predict_kwargs = self.extract_feat(inputs, test_mode=True) predictions = self.cls_head.predict(feats, data_samples, **predict_kwargs) return predictions
Predict results from a batch of inputs and data samples with post- processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. data_samples (List[``ActionDataSample``]): The batch data samples. It usually includes information such as ``gt_label``. Returns: List[``ActionDataSample``]: Return the recognition results. The returns value is ``ActionDataSample``, which usually contains ``pred_scores``. And the ``pred_scores`` usually contains following keys. - item (torch.Tensor): Classification scores, has a shape (num_classes, )
predict
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def _forward(self, inputs: torch.Tensor, stage: str = 'backbone', **kwargs) -> ForwardResults: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. stage (str): Which stage to output the features. Returns: Union[tuple, torch.Tensor]: Features from ``backbone`` or ``neck`` or ``head`` forward. """ feats, _ = self.extract_feat(inputs, stage=stage) return feats
Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. stage (str): Which stage to output the features. Returns: Union[tuple, torch.Tensor]: Features from ``backbone`` or ``neck`` or ``head`` forward.
_forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def forward(self, inputs: torch.Tensor, data_samples: OptSampleList = None, mode: str = 'tensor', **kwargs) -> ForwardResults: """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: inputs (torch.Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[``ActionDataSample], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor. """ if mode == 'tensor': return self._forward(inputs, **kwargs) if mode == 'predict': return self.predict(inputs, data_samples, **kwargs) elif mode == 'loss': return self.loss(inputs, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}". ' 'Only supports loss, predict and tensor mode')
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: inputs (torch.Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[``ActionDataSample], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor.
forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def extract_feat(self, inputs: Tensor, stage: str = 'neck', data_samples: OptSampleList = None, test_mode: bool = False) -> tuple: """Extract features of different stages. Args: inputs (torch.Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``'neck'``. data_samples (list[:obj:`ActionDataSample`], optional): Action data samples, which are only needed in training. Defaults to None. test_mode (bool): Whether in test mode. Defaults to False. Returns: torch.Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``loss_aux``. """ # Record the kwargs required by `loss` and `predict` loss_predict_kwargs = dict() num_segs = inputs.shape[1] # [N, num_crops, C, T, H, W] -> # [N * num_crops, C, T, H, W] # `num_crops` is calculated by: # 1) `twice_sample` in `SampleFrames` # 2) `num_sample_positions` in `DenseSampleFrames` # 3) `ThreeCrop/TenCrop` in `test_pipeline` # 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1` inputs = inputs.view((-1, ) + inputs.shape[2:]) # Check settings of test if test_mode: if self.test_cfg is not None: loss_predict_kwargs['fcn_test'] = self.test_cfg.get( 'fcn_test', False) if self.test_cfg is not None and self.test_cfg.get( 'max_testing_views', False): max_testing_views = self.test_cfg.get('max_testing_views') assert isinstance(max_testing_views, int) total_views = inputs.shape[0] assert num_segs == total_views, ( 'max_testing_views is only compatible ' 'with batch_size == 1') view_ptr = 0 feats = [] while view_ptr < total_views: batch_imgs = inputs[view_ptr:view_ptr + max_testing_views] feat = self.backbone(batch_imgs) if self.with_neck: feat, _ = self.neck(feat) feats.append(feat) view_ptr += max_testing_views def recursively_cat(feats): # recursively traverse feats until it's a tensor, # then concat out_feats = [] for e_idx, elem in enumerate(feats[0]): batch_elem = [feat[e_idx] for feat in feats] if not isinstance(elem, torch.Tensor): batch_elem = recursively_cat(batch_elem) else: batch_elem = torch.cat(batch_elem) out_feats.append(batch_elem) return tuple(out_feats) if isinstance(feats[0], tuple): x = recursively_cat(feats) else: x = torch.cat(feats) else: x = self.backbone(inputs) if self.with_neck: x, _ = self.neck(x) return x, loss_predict_kwargs else: # Return features extracted through backbone x = self.backbone(inputs) if stage == 'backbone': return x, loss_predict_kwargs loss_aux = dict() if self.with_neck: x, loss_aux = self.neck(x, data_samples=data_samples) # Return features extracted through neck loss_predict_kwargs['loss_aux'] = loss_aux if stage == 'neck': return x, loss_predict_kwargs # Return raw logits through head. if self.with_cls_head and stage == 'head': x = self.cls_head(x, **loss_predict_kwargs) return x, loss_predict_kwargs
Extract features of different stages. Args: inputs (torch.Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``'neck'``. data_samples (list[:obj:`ActionDataSample`], optional): Action data samples, which are only needed in training. Defaults to None. test_mode (bool): Whether in test mode. Defaults to False. Returns: torch.Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``loss_aux``.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer3d.py
Apache-2.0
def init_weights(self) -> None: """Initiate the parameters from scratch.""" pass
Initiate the parameters from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def forward(self, inputs, data_samples, mode, **kwargs): """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: inputs (Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[:obj:`ActionDataSample`], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor. """ inputs = torch.stack(inputs) if mode == 'tensor': return self._forward(inputs, **kwargs) if mode == 'predict': return self.predict(inputs, data_samples, **kwargs) elif mode == 'loss': return self.loss(inputs, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}". ' 'Only supports loss, predict and tensor mode')
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: inputs (Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[:obj:`ActionDataSample`], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor.
forward
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def loss(self, batch_inputs, batch_data_samples, **kwargs): """Calculate losses from a batch of inputs and data samples. Args: batch_inputs (Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. batch_data_samples (List[:obj:`ActionDataSample`]): The batch data samples. It usually includes information such as ``gt_labels``. Returns: dict: A dictionary of loss components. """ gt_bbox = [ sample.gt_instances['gt_bbox'] for sample in batch_data_samples ] label_confidence, label_start, label_end = self.generate_labels( gt_bbox) device = batch_inputs.device label_confidence = label_confidence.to(device) label_start = label_start.to(device) label_end = label_end.to(device) confidence_map, start, end = self._forward(batch_inputs) loss = self.loss_cls(confidence_map, start, end, label_confidence, label_start, label_end, self.bm_mask) loss_dict = dict(loss=loss[0]) return loss_dict
Calculate losses from a batch of inputs and data samples. Args: batch_inputs (Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. batch_data_samples (List[:obj:`ActionDataSample`]): The batch data samples. It usually includes information such as ``gt_labels``. Returns: dict: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def predict(self, batch_inputs, batch_data_samples, **kwargs): """Define the computation performed at every call when testing.""" confidence_map, start, end = self._forward(batch_inputs) start_scores = start[0].cpu().numpy() end_scores = end[0].cpu().numpy() cls_confidence = (confidence_map[0][1]).cpu().numpy() reg_confidence = (confidence_map[0][0]).cpu().numpy() max_start = max(start_scores) max_end = max(end_scores) # generate the set of start points and end points start_bins = np.zeros(len(start_scores)) start_bins[0] = 1 # [1,0,0...,0,0] end_bins = np.zeros(len(end_scores)) end_bins[-1] = 1 # [0,0,0...,0,1] for idx in range(1, self.tscale - 1): if start_scores[idx] > start_scores[ idx + 1] and start_scores[idx] > start_scores[idx - 1]: start_bins[idx] = 1 elif start_scores[idx] > (0.5 * max_start): start_bins[idx] = 1 if end_scores[idx] > end_scores[ idx + 1] and end_scores[idx] > end_scores[idx - 1]: end_bins[idx] = 1 elif end_scores[idx] > (0.5 * max_end): end_bins[idx] = 1 # iterate through all combinations of start_index and end_index new_proposals = [] for idx in range(self.tscale): for jdx in range(self.tscale): start_index = jdx end_index = start_index + idx + 1 if end_index < self.tscale and start_bins[ start_index] == 1 and end_bins[end_index] == 1: tmin = start_index / self.tscale tmax = end_index / self.tscale tmin_score = start_scores[start_index] tmax_score = end_scores[end_index] cls_score = cls_confidence[idx, jdx] reg_score = reg_confidence[idx, jdx] score = tmin_score * tmax_score * cls_score * reg_score new_proposals.append([ tmin, tmax, tmin_score, tmax_score, cls_score, reg_score, score ]) new_proposals = np.stack(new_proposals) video_info = batch_data_samples[0].metainfo proposal_list = post_processing(new_proposals, video_info, self.soft_nms_alpha, self.soft_nms_low_threshold, self.soft_nms_high_threshold, self.post_process_top_k, self.feature_extraction_interval) output = [ dict( video_name=video_info['video_name'], proposal_list=proposal_list) ] return output
Define the computation performed at every call when testing.
predict
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _get_interp1d_bin_mask(seg_tmin, seg_tmax, tscale, num_samples, num_samples_per_bin): """Generate sample mask for a boundary-matching pair.""" plen = float(seg_tmax - seg_tmin) plen_sample = plen / (num_samples * num_samples_per_bin - 1.0) total_samples = [ seg_tmin + plen_sample * i for i in range(num_samples * num_samples_per_bin) ] p_mask = [] for idx in range(num_samples): bin_samples = total_samples[idx * num_samples_per_bin:(idx + 1) * num_samples_per_bin] bin_vector = np.zeros(tscale) for sample in bin_samples: sample_upper = math.ceil(sample) sample_decimal, sample_down = math.modf(sample) if 0 <= int(sample_down) <= (tscale - 1): bin_vector[int(sample_down)] += 1 - sample_decimal if 0 <= int(sample_upper) <= (tscale - 1): bin_vector[int(sample_upper)] += sample_decimal bin_vector = 1.0 / num_samples_per_bin * bin_vector p_mask.append(bin_vector) p_mask = np.stack(p_mask, axis=1) return p_mask
Generate sample mask for a boundary-matching pair.
_get_interp1d_bin_mask
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _get_interp1d_mask(self): """Generate sample mask for each point in Boundary-Matching Map.""" mask_mat = [] for start_index in range(self.tscale): mask_mat_vector = [] for duration_index in range(self.tscale): if start_index + duration_index < self.tscale: p_tmin = start_index p_tmax = start_index + duration_index center_len = float(p_tmax - p_tmin) + 1 sample_tmin = p_tmin - (center_len * self.boundary_ratio) sample_tmax = p_tmax + (center_len * self.boundary_ratio) p_mask = self._get_interp1d_bin_mask( sample_tmin, sample_tmax, self.tscale, self.num_samples, self.num_samples_per_bin) else: p_mask = np.zeros([self.tscale, self.num_samples]) mask_mat_vector.append(p_mask) mask_mat_vector = np.stack(mask_mat_vector, axis=2) mask_mat.append(mask_mat_vector) mask_mat = np.stack(mask_mat, axis=3) mask_mat = mask_mat.astype(np.float32) self.sample_mask = nn.Parameter( torch.tensor(mask_mat).view(self.tscale, -1), requires_grad=False)
Generate sample mask for each point in Boundary-Matching Map.
_get_interp1d_mask
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _get_bm_mask(self): """Generate Boundary-Matching Mask.""" bm_mask = [] for idx in range(self.tscale): mask_vector = [1] * (self.tscale - idx) + [0] * idx bm_mask.append(mask_vector) bm_mask = torch.tensor(bm_mask, dtype=torch.float) return bm_mask
Generate Boundary-Matching Mask.
_get_bm_mask
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _match_map(self): """Generate match map.""" temporal_gap = 1. / self.tscale match_map = [] for idx in range(self.tscale): match_window = [] tmin = temporal_gap * idx for jdx in range(1, self.tscale + 1): tmax = tmin + temporal_gap * jdx match_window.append([tmin, tmax]) match_map.append(match_window) match_map = np.array(match_map) match_map = np.transpose(match_map, [1, 0, 2]) match_map = np.reshape(match_map, [-1, 2]) return match_map
Generate match map.
_match_map
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.): """Generate temporal anchors. Args: tmin_offset (int): Offset for the minimum value of temporal anchor. Default: 0. tmax_offset (int): Offset for the maximum value of temporal anchor. Default: 1. Returns: tuple[Sequence[float]]: The minimum and maximum values of temporal anchors. """ temporal_gap = 1. / self.tscale anchors_tmins = [] anchors_tmaxs = [] for i in range(self.tscale): anchors_tmins.append(temporal_gap * (i + tmin_offset)) anchors_tmaxs.append(temporal_gap * (i + tmax_offset)) return anchors_tmins, anchors_tmaxs
Generate temporal anchors. Args: tmin_offset (int): Offset for the minimum value of temporal anchor. Default: 0. tmax_offset (int): Offset for the maximum value of temporal anchor. Default: 1. Returns: tuple[Sequence[float]]: The minimum and maximum values of temporal anchors.
_temporal_anchors
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _forward(self, x): """Define the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module. """ # x.shape [batch_size, self.feat_dim, self.tscale] base_feature = self.x_1d_b(x) # base_feature.shape [batch_size, self.hidden_dim_1d, self.tscale] start = self.x_1d_s(base_feature).squeeze(1) # start.shape [batch_size, self.tscale] end = self.x_1d_e(base_feature).squeeze(1) # end.shape [batch_size, self.tscale] confidence_map = self.x_1d_p(base_feature) # [batch_size, self.hidden_dim_1d, self.tscale] confidence_map = self._boundary_matching_layer(confidence_map) # [batch_size, self.hidden_dim_1d,, self.num_sampls, self.tscale, self.tscale] # noqa confidence_map = self.x_3d_p(confidence_map).squeeze(2) # [batch_size, self.hidden_dim_3d, self.tscale, self.tscale] confidence_map = self.x_2d_p(confidence_map) # [batch_size, 2, self.tscale, self.tscale] return confidence_map, start, end
Define the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module.
_forward
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _boundary_matching_layer(self, x): """Generate matching layer.""" input_size = x.size() out = torch.matmul(x, self.sample_mask).reshape(input_size[0], input_size[1], self.num_samples, self.tscale, self.tscale) return out
Generate matching layer.
_boundary_matching_layer
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def generate_labels(self, gt_bbox): """Generate training labels.""" # TODO: do this without numpy match_score_confidence_list = [] match_score_start_list = [] match_score_end_list = [] for every_gt_bbox in gt_bbox: gt_iou_map = [] every_gt_bbox = every_gt_bbox.cpu() for start, end in every_gt_bbox: if isinstance(start, torch.Tensor): start = start.numpy() if isinstance(end, torch.Tensor): end = end.numpy() current_gt_iou_map = temporal_iou(self.match_map[:, 0], self.match_map[:, 1], start, end) current_gt_iou_map = np.reshape(current_gt_iou_map, [self.tscale, self.tscale]) gt_iou_map.append(current_gt_iou_map) gt_iou_map = np.array(gt_iou_map).astype(np.float32) gt_iou_map = np.max(gt_iou_map, axis=0) gt_tmins = every_gt_bbox[:, 0] gt_tmaxs = every_gt_bbox[:, 1] gt_len_pad = 3 * (1. / self.tscale) gt_start_bboxs = np.stack( (gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1) gt_end_bboxs = np.stack( (gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1) match_score_start = [] match_score_end = [] for anchor_tmin, anchor_tmax in zip(self.anchors_tmins, self.anchors_tmaxs): match_score_start.append( np.max( temporal_iop(anchor_tmin, anchor_tmax, gt_start_bboxs[:, 0], gt_start_bboxs[:, 1]))) match_score_end.append( np.max( temporal_iop(anchor_tmin, anchor_tmax, gt_end_bboxs[:, 0], gt_end_bboxs[:, 1]))) match_score_confidence_list.append(gt_iou_map) match_score_start_list.append(match_score_start) match_score_end_list.append(match_score_end) def to_tensor(x): return torch.Tensor(np.array(x)) match_score_confidence_list = to_tensor(match_score_confidence_list) match_score_start_list = to_tensor(match_score_start_list) match_score_end_list = to_tensor(match_score_end_list) return (match_score_confidence_list, match_score_start_list, match_score_end_list)
Generate training labels.
generate_labels
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def forward(self, x: Tensor) -> Tensor: """Forward call for LGTE. Args: x (torch.Tensor): The input tensor with shape (B, C, L) """ x = x.permute(2, 0, 1) mask = self.mask.repeat(x.size(1), 1, 1, 1) L = x.shape[0] x = self.atten(x, attn_mask=mask.reshape(-1, L, L)) x = self.norm1(x) x = self.ffn(x) x = self.norm2(x) x = x.permute(1, 2, 0) return x
Forward call for LGTE. Args: x (torch.Tensor): The input tensor with shape (B, C, L)
forward
python
open-mmlab/mmaction2
mmaction/models/localizers/tcanet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py
Apache-2.0
def StartEndRegressor(sample_num: int, feat_dim: int) -> nn.Module: """Start and End Regressor in the Temporal Boundary Regressor. Args: sample_num (int): number of samples for the start & end. feat_dim (int): feature dimension. Returns: A pytorch module that works as the start and end regressor. The input of the module should have a shape of (B, feat_dim * 2, sample_num). """ hidden_dim = 128 regressor = nn.Sequential( nn.Conv1d( feat_dim * 2, hidden_dim * 2, kernel_size=3, padding=1, groups=8, stride=2), nn.ReLU(inplace=True), nn.Conv1d( hidden_dim * 2, hidden_dim * 2, kernel_size=3, padding=1, groups=8, stride=2), nn.ReLU(inplace=True), nn.Conv1d(hidden_dim * 2, 2, kernel_size=sample_num // 4, groups=2), nn.Flatten()) return regressor
Start and End Regressor in the Temporal Boundary Regressor. Args: sample_num (int): number of samples for the start & end. feat_dim (int): feature dimension. Returns: A pytorch module that works as the start and end regressor. The input of the module should have a shape of (B, feat_dim * 2, sample_num).
StartEndRegressor
python
open-mmlab/mmaction2
mmaction/models/localizers/tcanet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py
Apache-2.0
def CenterWidthRegressor(temporal_len: int, feat_dim: int) -> nn.Module: """Center Width in the Temporal Boundary Regressor. Args: temporal_len (int): temporal dimension of the inputs. feat_dim (int): feature dimension. Returns: A pytorch module that works as the start and end regressor. The input of the module should have a shape of (B, feat_dim, temporal_len). """ hidden_dim = 512 regressor = nn.Sequential( nn.Conv1d( feat_dim, hidden_dim, kernel_size=3, padding=1, groups=4, stride=2), nn.ReLU(inplace=True), nn.Conv1d( hidden_dim, hidden_dim, kernel_size=3, padding=1, groups=4, stride=2), nn.ReLU(inplace=True), nn.Conv1d( hidden_dim, hidden_dim, kernel_size=temporal_len // 4, groups=4), nn.ReLU(inplace=True), nn.Conv1d(hidden_dim, 3, kernel_size=1)) return regressor
Center Width in the Temporal Boundary Regressor. Args: temporal_len (int): temporal dimension of the inputs. feat_dim (int): feature dimension. Returns: A pytorch module that works as the start and end regressor. The input of the module should have a shape of (B, feat_dim, temporal_len).
CenterWidthRegressor
python
open-mmlab/mmaction2
mmaction/models/localizers/tcanet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py
Apache-2.0
def forward(self, inputs, data_samples, mode, **kwargs): """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: inputs (Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[:obj:`ActionDataSample`], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor. """ if not isinstance(input, Tensor): inputs = torch.stack(inputs) if mode == 'tensor': return self._forward(inputs, **kwargs) if mode == 'predict': return self.predict(inputs, data_samples, **kwargs) elif mode == 'loss': return self.loss(inputs, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}". ' 'Only supports loss, predict and tensor mode')
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: inputs (Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[:obj:`ActionDataSample`], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor.
forward
python
open-mmlab/mmaction2
mmaction/models/localizers/tcanet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py
Apache-2.0
def _forward(self, x): """Define the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module. """ x = self.x_1d_b_f(x) for layer in self.lgtes: x = layer(x) return x
Define the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module.
_forward
python
open-mmlab/mmaction2
mmaction/models/localizers/tcanet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py
Apache-2.0
def init_weights(self) -> None: """Initiate the parameters either from existing checkpoint or from scratch.""" for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1)
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.): """Generate temporal anchors. Args: tmin_offset (int): Offset for the minimum value of temporal anchor. Default: 0. tmax_offset (int): Offset for the maximum value of temporal anchor. Default: 1. Returns: tuple[Sequence[float]]: The minimum and maximum values of temporal anchors. """ temporal_gap = 1. / self.temporal_dim anchors_tmins = [] anchors_tmaxs = [] for i in range(self.temporal_dim): anchors_tmins.append(temporal_gap * (i + tmin_offset)) anchors_tmaxs.append(temporal_gap * (i + tmax_offset)) return anchors_tmins, anchors_tmaxs
Generate temporal anchors. Args: tmin_offset (int): Offset for the minimum value of temporal anchor. Default: 0. tmax_offset (int): Offset for the maximum value of temporal anchor. Default: 1. Returns: tuple[Sequence[float]]: The minimum and maximum values of temporal anchors.
_temporal_anchors
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def _forward(self, x): """Define the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module. """ x = F.relu(self.conv1_ratio * self.conv1(x)) x = F.relu(self.conv2_ratio * self.conv2(x)) x = torch.sigmoid(self.conv3_ratio * self.conv3(x)) return x
Define the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module.
_forward
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def loss(self, batch_inputs, batch_data_samples, **kwargs): """Calculate losses from a batch of inputs and data samples. Args: batch_inputs (Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. batch_data_samples (List[:obj:`ActionDataSample`]): The batch data samples. It usually includes information such as ``gt_labels``. Returns: dict: A dictionary of loss components. """ tem_output = self._forward(batch_inputs) score_action = tem_output[:, 0, :] score_start = tem_output[:, 1, :] score_end = tem_output[:, 2, :] gt_bbox = [ sample.gt_instances['gt_bbox'] for sample in batch_data_samples ] label_action, label_start, label_end = self.generate_labels(gt_bbox) device = batch_inputs.device label_action = label_action.to(device) label_start = label_start.to(device) label_end = label_end.to(device) loss_action = self.loss_cls(score_action, label_action, self.match_threshold) loss_start = self.loss_cls(score_start, label_start, self.match_threshold) loss_end = self.loss_cls(score_end, label_end, self.match_threshold) loss_dict = { 'loss_action': loss_action * self.loss_weight, 'loss_start': loss_start, 'loss_end': loss_end } return loss_dict
Calculate losses from a batch of inputs and data samples. Args: batch_inputs (Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. batch_data_samples (List[:obj:`ActionDataSample`]): The batch data samples. It usually includes information such as ``gt_labels``. Returns: dict: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def predict(self, batch_inputs, batch_data_samples, **kwargs): """Define the computation performed at every call when testing.""" tem_output = self._forward(batch_inputs).cpu().numpy() batch_action = tem_output[:, 0, :] batch_start = tem_output[:, 1, :] batch_end = tem_output[:, 2, :] video_results = [] for batch_idx, _ in enumerate(batch_action): video_name = batch_data_samples[batch_idx].metainfo['video_name'] video_action = batch_action[batch_idx] video_start = batch_start[batch_idx] video_end = batch_end[batch_idx] video_result = np.stack((video_action, video_start, video_end, self.anchors_tmins, self.anchors_tmaxs), axis=1) video_results.append((video_name, video_result)) return video_results
Define the computation performed at every call when testing.
predict
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def generate_labels(self, gt_bbox): """Generate training labels.""" # TODO: do this without numpy match_score_action_list = [] match_score_start_list = [] match_score_end_list = [] for every_gt_bbox in gt_bbox: gt_tmins = every_gt_bbox[:, 0].cpu().numpy() gt_tmaxs = every_gt_bbox[:, 1].cpu().numpy() gt_lens = gt_tmaxs - gt_tmins gt_len_pad = np.maximum(1. / self.temporal_dim, self.boundary_ratio * gt_lens) gt_start_bboxs = np.stack( (gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1) gt_end_bboxs = np.stack( (gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1) match_score_action = [] match_score_start = [] match_score_end = [] for anchor_tmin, anchor_tmax in zip(self.anchors_tmins, self.anchors_tmaxs): match_score_action.append( np.max( temporal_iop(anchor_tmin, anchor_tmax, gt_tmins, gt_tmaxs))) match_score_start.append( np.max( temporal_iop(anchor_tmin, anchor_tmax, gt_start_bboxs[:, 0], gt_start_bboxs[:, 1]))) match_score_end.append( np.max( temporal_iop(anchor_tmin, anchor_tmax, gt_end_bboxs[:, 0], gt_end_bboxs[:, 1]))) match_score_action_list.append(match_score_action) match_score_start_list.append(match_score_start) match_score_end_list.append(match_score_end) match_score_action_list = torch.Tensor(match_score_action_list) match_score_start_list = torch.Tensor(match_score_start_list) match_score_end_list = torch.Tensor(match_score_end_list) return (match_score_action_list, match_score_start_list, match_score_end_list)
Generate training labels.
generate_labels
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def forward(self, inputs, data_samples, mode, **kwargs): """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: inputs (Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[:obj:`ActionDataSample`], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor. """ if type(inputs) is not torch.Tensor: inputs = torch.stack(inputs) if mode == 'tensor': return self._forward(inputs, **kwargs) if mode == 'predict': return self.predict(inputs, data_samples, **kwargs) elif mode == 'loss': return self.loss(inputs, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}". ' 'Only supports loss, predict and tensor mode')
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: inputs (Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[:obj:`ActionDataSample`], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor.
forward
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def init_weights(self) -> None: """Initiate the parameters either from existing checkpoint or from scratch.""" for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1)
Initiate the parameters either from existing checkpoint or from scratch.
init_weights
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def _forward(self, x): """Define the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module. """ x = F.relu(self.fc1_ratio * self.fc1(x)) x = torch.sigmoid(self.fc2_ratio * self.fc2(x)) return x
Define the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module.
_forward
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def loss(self, batch_inputs, batch_data_samples, **kwargs): """Calculate losses from a batch of inputs and data samples. Args: batch_inputs (Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. batch_data_samples (List[:obj:`ActionDataSample`]): The batch data samples. It usually includes information such as ``gt_labels``. Returns: dict: A dictionary of loss components. """ device = self.fc1.weight.device bsp_feature = torch.cat([ sample.gt_instances['bsp_feature'] for sample in batch_data_samples ]).to(device) reference_temporal_iou = torch.cat([ sample.gt_instances['reference_temporal_iou'] for sample in batch_data_samples ]).to(device) pem_output = self._forward(bsp_feature) anchors_temporal_iou = pem_output.view(-1) u_hmask = (reference_temporal_iou > self.pem_high_temporal_iou_threshold).float() u_mmask = ( (reference_temporal_iou <= self.pem_high_temporal_iou_threshold) & (reference_temporal_iou > self.pem_low_temporal_iou_threshold) ).float() u_lmask = (reference_temporal_iou <= self.pem_low_temporal_iou_threshold).float() num_h = torch.sum(u_hmask) num_m = torch.sum(u_mmask) num_l = torch.sum(u_lmask) r_m = self.u_ratio_m * num_h / (num_m) r_m = torch.min(r_m, torch.Tensor([1.0]).to(device))[0] u_smmask = torch.rand(u_hmask.size()[0], device=device) u_smmask = u_smmask * u_mmask u_smmask = (u_smmask > (1. - r_m)).float() r_l = self.u_ratio_l * num_h / (num_l) r_l = torch.min(r_l, torch.Tensor([1.0]).to(device))[0] u_slmask = torch.rand(u_hmask.size()[0], device=device) u_slmask = u_slmask * u_lmask u_slmask = (u_slmask > (1. - r_l)).float() temporal_iou_weights = u_hmask + u_smmask + u_slmask temporal_iou_loss = F.smooth_l1_loss(anchors_temporal_iou, reference_temporal_iou) temporal_iou_loss = torch.sum( temporal_iou_loss * temporal_iou_weights) / torch.sum(temporal_iou_weights) loss_dict = dict(temporal_iou_loss=temporal_iou_loss) return loss_dict
Calculate losses from a batch of inputs and data samples. Args: batch_inputs (Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. batch_data_samples (List[:obj:`ActionDataSample`]): The batch data samples. It usually includes information such as ``gt_labels``. Returns: dict: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def predict(self, batch_inputs, batch_data_samples, **kwargs): """Define the computation performed at every call when testing.""" device = self.fc1.weight.device bsp_feature = torch.cat([ sample.gt_instances['bsp_feature'] for sample in batch_data_samples ]).to(device) pem_output = self._forward(bsp_feature).view(-1).cpu().numpy() pem_output = pem_output.reshape(-1, 1) gt_instances = [sample.gt_instances for sample in batch_data_samples] tmin = self._parse(gt_instances, 'tmin') tmax = self._parse(gt_instances, 'tmax') tmin_score = self._parse(gt_instances, 'tmin_score') tmax_score = self._parse(gt_instances, 'tmax_score') score = np.array(pem_output * tmin_score * tmax_score).reshape(-1, 1) result = np.concatenate( (tmin, tmax, tmin_score, tmax_score, pem_output, score), axis=1) result = result.reshape(-1, 6) video_info = batch_data_samples[0].metainfo proposal_list = post_processing(result, video_info, self.soft_nms_alpha, self.soft_nms_low_threshold, self.soft_nms_high_threshold, self.post_process_top_k, self.feature_extraction_interval) output = [ dict( video_name=video_info['video_name'], proposal_list=proposal_list) ] return output
Define the computation performed at every call when testing.
predict
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def forward(self, inputs, data_samples, mode, **kwargs): """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: batch_inputs (Tensor): The input tensor with shape (N, C, ...) in general. batch_data_samples (List[:obj:`ActionDataSample`], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor. """ inputs = torch.stack(inputs) if mode == 'tensor': return self._forward(inputs, **kwargs) if mode == 'predict': return self.predict(inputs, data_samples, **kwargs) elif mode == 'loss': return self.loss(inputs, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}". ' 'Only supports loss, predict and tensor mode')
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: batch_inputs (Tensor): The input tensor with shape (N, C, ...) in general. batch_data_samples (List[:obj:`ActionDataSample`], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor.
forward
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def __init__(self, pre_nms_thresh, pre_nms_top_n, nms_thresh, fpn_post_nms_top_n, min_size, num_classes, is_first_stage): """ Arguments: pre_nms_thresh (float) pre_nms_top_n (int) nms_thresh (float) fpn_post_nms_top_n (int) min_size (int) num_classes (int) box_coder (BoxCoder) """ super(FCOSPostProcessor, self).__init__() self.pre_nms_thresh = pre_nms_thresh self.pre_nms_top_n = pre_nms_top_n self.nms_thresh = nms_thresh self.fpn_post_nms_top_n = fpn_post_nms_top_n self.min_size = min_size self.num_classes = num_classes self.innerness_threshold = 0.15 self.downsample_scale = 32 self.is_first_stage = is_first_stage
Arguments: pre_nms_thresh (float) pre_nms_top_n (int) nms_thresh (float) fpn_post_nms_top_n (int) min_size (int) num_classes (int) box_coder (BoxCoder)
__init__
python
open-mmlab/mmaction2
mmaction/models/localizers/drn/drn_utils/inference.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/drn/drn_utils/inference.py
Apache-2.0
def forward_for_single_feature_map(self, locations, box_cls, box_regression, level, iou_scores): """ Arguments: anchors: list[BoxList] box_cls: tensor of size N, A * C, H, W box_regression: tensor of size N, A * 4, H, W """ N, C, T = box_cls.shape # put in the same format as locations box_cls = box_cls.permute(0, 2, 1).contiguous().sigmoid() iou_scores = iou_scores.permute(0, 2, 1).contiguous().sigmoid() box_regression = box_regression.permute(0, 2, 1) # centerness = centerness.permute(0, 2, 1) # centerness = centerness.reshape(N, -1).sigmoid() # inner = inner.squeeze().sigmoid() candidate_inds = (box_cls > self.pre_nms_thresh) pre_nms_top_n = candidate_inds.view(N, -1).sum(1) pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n) # multiply the classification scores with centerness scores # box_cls = box_cls * centerness[:, :, None] # box_cls = box_cls + centerness[:, :, None] if not self.is_first_stage: box_cls = box_cls * iou_scores results = [] for i in range(N): # per_centerness = centerness[i] per_box_cls = box_cls[i] per_candidate_inds = candidate_inds[i] per_box_cls = per_box_cls[per_candidate_inds] per_candidate_nonzeros = per_candidate_inds.nonzero() per_box_loc = per_candidate_nonzeros[:, 0] per_class = per_candidate_nonzeros[:, 1] + 1 per_box_regression = box_regression[i] per_box_regression = per_box_regression[per_box_loc] per_locations = locations[per_box_loc] # per_centerness = per_centerness[per_box_loc] per_pre_nms_top_n = pre_nms_top_n[i] if per_candidate_inds.sum().item() > per_pre_nms_top_n.item(): per_box_cls, top_k_indices = \ per_box_cls.topk(per_pre_nms_top_n, sorted=False) per_class = per_class[top_k_indices] per_box_regression = per_box_regression[top_k_indices] per_locations = per_locations[top_k_indices] # per_centerness = per_centerness[top_k_indices] detections = torch.stack([ per_locations - per_box_regression[:, 0], per_locations + per_box_regression[:, 1], ], dim=1) / self.downsample_scale detections[:, 0].clamp_(min=0, max=1) detections[:, 1].clamp_(min=0, max=1) # remove small boxes p_start, p_end = detections.unbind(dim=1) duration = p_end - p_start keep = (duration >= self.min_size).nonzero().squeeze(1) detections = detections[keep] temp_dict = {} temp_dict['detections'] = detections temp_dict['labels'] = per_class temp_dict['scores'] = torch.sqrt(per_box_cls) temp_dict['level'] = [level] # temp_dict['centerness'] = per_centerness temp_dict['locations'] = per_locations / 32 results.append(temp_dict) return results
Arguments: anchors: list[BoxList] box_cls: tensor of size N, A * C, H, W box_regression: tensor of size N, A * 4, H, W
forward_for_single_feature_map
python
open-mmlab/mmaction2
mmaction/models/localizers/drn/drn_utils/inference.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/drn/drn_utils/inference.py
Apache-2.0
def forward(self, locations, box_cls, box_regression, iou_scores): """ Arguments: anchors: list[list[BoxList]] box_cls: list[tensor] box_regression: list[tensor] image_sizes: list[(h, w)] Returns: boxlists (list[BoxList]): the post-processed anchors, after applying box decoding and NMS """ sampled_boxes = [] for i, (l, o, b, iou_s) in enumerate( zip(locations, box_cls, box_regression, iou_scores)): sampled_boxes.append( self.forward_for_single_feature_map(l, o, b, i, iou_s)) boxlists = list(zip(*sampled_boxes)) # boxlists = [cat_boxlist(boxlist) for boxlist in boxlists] boxlists = self.select_over_all_levels(boxlists) return boxlists
Arguments: anchors: list[list[BoxList]] box_cls: list[tensor] box_regression: list[tensor] image_sizes: list[(h, w)] Returns: boxlists (list[BoxList]): the post-processed anchors, after applying box decoding and NMS
forward
python
open-mmlab/mmaction2
mmaction/models/localizers/drn/drn_utils/inference.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/drn/drn_utils/inference.py
Apache-2.0
def temporal_iou(proposal_min, proposal_max, gt_min, gt_max): """Compute IoU score between a groundtruth bbox and the proposals. Args: proposal_min (list[float]): List of temporal anchor min. proposal_max (list[float]): List of temporal anchor max. gt_min (float): Groundtruth temporal box min. gt_max (float): Groundtruth temporal box max. Returns: list[float]: List of iou scores. """ len_anchors = proposal_max - proposal_min int_tmin = np.maximum(proposal_min, gt_min) int_tmax = np.minimum(proposal_max, gt_max) inter_len = np.maximum(int_tmax - int_tmin, 0.) union_len = len_anchors - inter_len + gt_max - gt_min jaccard = np.divide(inter_len, union_len) return jaccard
Compute IoU score between a groundtruth bbox and the proposals. Args: proposal_min (list[float]): List of temporal anchor min. proposal_max (list[float]): List of temporal anchor max. gt_min (float): Groundtruth temporal box min. gt_max (float): Groundtruth temporal box max. Returns: list[float]: List of iou scores.
temporal_iou
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/proposal_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py
Apache-2.0
def temporal_iop(proposal_min, proposal_max, gt_min, gt_max): """Compute IoP score between a groundtruth bbox and the proposals. Compute the IoP which is defined as the overlap ratio with groundtruth proportional to the duration of this proposal. Args: proposal_min (list[float]): List of temporal anchor min. proposal_max (list[float]): List of temporal anchor max. gt_min (float): Groundtruth temporal box min. gt_max (float): Groundtruth temporal box max. Returns: list[float]: List of intersection over anchor scores. """ len_anchors = np.array(proposal_max - proposal_min) int_tmin = np.maximum(proposal_min, gt_min) int_tmax = np.minimum(proposal_max, gt_max) inter_len = np.maximum(int_tmax - int_tmin, 0.) scores = np.divide(inter_len, len_anchors) return scores
Compute IoP score between a groundtruth bbox and the proposals. Compute the IoP which is defined as the overlap ratio with groundtruth proportional to the duration of this proposal. Args: proposal_min (list[float]): List of temporal anchor min. proposal_max (list[float]): List of temporal anchor max. gt_min (float): Groundtruth temporal box min. gt_max (float): Groundtruth temporal box max. Returns: list[float]: List of intersection over anchor scores.
temporal_iop
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/proposal_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py
Apache-2.0
def soft_nms(proposals, alpha, low_threshold, high_threshold, top_k): """Soft NMS for temporal proposals. Args: proposals (np.ndarray): Proposals generated by network. alpha (float): Alpha value of Gaussian decaying function. low_threshold (float): Low threshold for soft nms. high_threshold (float): High threshold for soft nms. top_k (int): Top k values to be considered. Returns: np.ndarray: The updated proposals. """ proposals = proposals[proposals[:, -1].argsort()[::-1]] tstart = list(proposals[:, 0]) tend = list(proposals[:, 1]) tscore = list(proposals[:, -1]) rstart = [] rend = [] rscore = [] while len(tscore) > 0 and len(rscore) <= top_k: max_index = np.argmax(tscore) max_width = tend[max_index] - tstart[max_index] iou_list = temporal_iou(tstart[max_index], tend[max_index], np.array(tstart), np.array(tend)) iou_exp_list = np.exp(-np.square(iou_list) / alpha) for idx, _ in enumerate(tscore): if idx != max_index: current_iou = iou_list[idx] if current_iou > low_threshold + (high_threshold - low_threshold) * max_width: tscore[idx] = tscore[idx] * iou_exp_list[idx] rstart.append(tstart[max_index]) rend.append(tend[max_index]) rscore.append(tscore[max_index]) tstart.pop(max_index) tend.pop(max_index) tscore.pop(max_index) rstart = np.array(rstart).reshape(-1, 1) rend = np.array(rend).reshape(-1, 1) rscore = np.array(rscore).reshape(-1, 1) new_proposals = np.concatenate((rstart, rend, rscore), axis=1) return new_proposals
Soft NMS for temporal proposals. Args: proposals (np.ndarray): Proposals generated by network. alpha (float): Alpha value of Gaussian decaying function. low_threshold (float): Low threshold for soft nms. high_threshold (float): High threshold for soft nms. top_k (int): Top k values to be considered. Returns: np.ndarray: The updated proposals.
soft_nms
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/proposal_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py
Apache-2.0
def post_processing(result, video_info, soft_nms_alpha, soft_nms_low_threshold, soft_nms_high_threshold, post_process_top_k, feature_extraction_interval): """Post process for temporal proposals generation. Args: result (np.ndarray): Proposals generated by network. video_info (dict): Meta data of video. Required keys are 'duration_frame', 'duration_second'. soft_nms_alpha (float): Alpha value of Gaussian decaying function. soft_nms_low_threshold (float): Low threshold for soft nms. soft_nms_high_threshold (float): High threshold for soft nms. post_process_top_k (int): Top k values to be considered. feature_extraction_interval (int): Interval used in feature extraction. Returns: list[dict]: The updated proposals, e.g. [{'score': 0.9, 'segment': [0, 1]}, {'score': 0.8, 'segment': [0, 2]}, ...]. """ if len(result) > 1: result = soft_nms(result, soft_nms_alpha, soft_nms_low_threshold, soft_nms_high_threshold, post_process_top_k) result = result[result[:, -1].argsort()[::-1]] video_duration = float( video_info['duration_frame'] // feature_extraction_interval * feature_extraction_interval ) / video_info['duration_frame'] * video_info['duration_second'] proposal_list = [] for j in range(min(post_process_top_k, len(result))): proposal = {} proposal['score'] = float(result[j, -1]) proposal['segment'] = [ max(0, result[j, 0]) * video_duration, min(1, result[j, 1]) * video_duration ] proposal_list.append(proposal) return proposal_list
Post process for temporal proposals generation. Args: result (np.ndarray): Proposals generated by network. video_info (dict): Meta data of video. Required keys are 'duration_frame', 'duration_second'. soft_nms_alpha (float): Alpha value of Gaussian decaying function. soft_nms_low_threshold (float): Low threshold for soft nms. soft_nms_high_threshold (float): High threshold for soft nms. post_process_top_k (int): Top k values to be considered. feature_extraction_interval (int): Interval used in feature extraction. Returns: list[dict]: The updated proposals, e.g. [{'score': 0.9, 'segment': [0, 1]}, {'score': 0.8, 'segment': [0, 2]}, ...].
post_processing
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/proposal_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py
Apache-2.0
def generate_candidate_proposals(video_list, video_infos, tem_results_dir, temporal_scale, peak_threshold, tem_results_ext='.csv', result_dict=None): """Generate Candidate Proposals with given temporal evaluation results. Each proposal file will contain: 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'. Args: video_list (list[int]): List of video indexes to generate proposals. video_infos (list[dict]): List of video_info dict that contains 'video_name', 'duration_frame', 'duration_second', 'feature_frame', and 'annotations'. tem_results_dir (str): Directory to load temporal evaluation results. temporal_scale (int): The number (scale) on temporal axis. peak_threshold (float): The threshold for proposal generation. tem_results_ext (str): File extension for temporal evaluation model output. Default: '.csv'. result_dict (dict | None): The dict to save the results. Default: None. Returns: dict: A dict contains video_name as keys and proposal list as value. If result_dict is not None, save the results to it. """ if tem_results_ext != '.csv': raise NotImplementedError('Only support csv format now.') tscale = temporal_scale tgap = 1. / tscale proposal_dict = {} for video_index in video_list: video_name = video_infos[video_index]['video_name'] tem_path = osp.join(tem_results_dir, video_name + tem_results_ext) tem_results = np.loadtxt( tem_path, dtype=np.float32, delimiter=',', skiprows=1) start_scores = tem_results[:, 1] end_scores = tem_results[:, 2] max_start = max(start_scores) max_end = max(end_scores) start_bins = np.zeros(len(start_scores)) start_bins[[0, -1]] = 1 end_bins = np.zeros(len(end_scores)) end_bins[[0, -1]] = 1 for idx in range(1, tscale - 1): if start_scores[idx] > start_scores[ idx + 1] and start_scores[idx] > start_scores[idx - 1]: start_bins[idx] = 1 elif start_scores[idx] > (peak_threshold * max_start): start_bins[idx] = 1 if end_scores[idx] > end_scores[ idx + 1] and end_scores[idx] > end_scores[idx - 1]: end_bins[idx] = 1 elif end_scores[idx] > (peak_threshold * max_end): end_bins[idx] = 1 tmin_list = [] tmin_score_list = [] tmax_list = [] tmax_score_list = [] for idx in range(tscale): if start_bins[idx] == 1: tmin_list.append(tgap / 2 + tgap * idx) tmin_score_list.append(start_scores[idx]) if end_bins[idx] == 1: tmax_list.append(tgap / 2 + tgap * idx) tmax_score_list.append(end_scores[idx]) new_props = [] for tmax, tmax_score in zip(tmax_list, tmax_score_list): for tmin, tmin_score in zip(tmin_list, tmin_score_list): if tmin >= tmax: break new_props.append([tmin, tmax, tmin_score, tmax_score]) new_props = np.stack(new_props) score = (new_props[:, 2] * new_props[:, 3]).reshape(-1, 1) new_props = np.concatenate((new_props, score), axis=1) new_props = new_props[new_props[:, -1].argsort()[::-1]] video_info = video_infos[video_index] video_frame = video_info['duration_frame'] video_second = video_info['duration_second'] feature_frame = video_info['feature_frame'] corrected_second = float(feature_frame) / video_frame * video_second gt_tmins = [] gt_tmaxs = [] for annotations in video_info['annotations']: gt_tmins.append(annotations['segment'][0] / corrected_second) gt_tmaxs.append(annotations['segment'][1] / corrected_second) new_iou_list = [] new_ioa_list = [] for new_prop in new_props: new_iou = max( temporal_iou(new_prop[0], new_prop[1], gt_tmins, gt_tmaxs)) new_ioa = max( temporal_iop(new_prop[0], new_prop[1], gt_tmins, gt_tmaxs)) new_iou_list.append(new_iou) new_ioa_list.append(new_ioa) new_iou_list = np.array(new_iou_list).reshape(-1, 1) new_ioa_list = np.array(new_ioa_list).reshape(-1, 1) new_props = np.concatenate((new_props, new_iou_list), axis=1) new_props = np.concatenate((new_props, new_ioa_list), axis=1) proposal_dict[video_name] = new_props if result_dict is not None: result_dict[video_name] = new_props return proposal_dict
Generate Candidate Proposals with given temporal evaluation results. Each proposal file will contain: 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'. Args: video_list (list[int]): List of video indexes to generate proposals. video_infos (list[dict]): List of video_info dict that contains 'video_name', 'duration_frame', 'duration_second', 'feature_frame', and 'annotations'. tem_results_dir (str): Directory to load temporal evaluation results. temporal_scale (int): The number (scale) on temporal axis. peak_threshold (float): The threshold for proposal generation. tem_results_ext (str): File extension for temporal evaluation model output. Default: '.csv'. result_dict (dict | None): The dict to save the results. Default: None. Returns: dict: A dict contains video_name as keys and proposal list as value. If result_dict is not None, save the results to it.
generate_candidate_proposals
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/bsn_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/bsn_utils.py
Apache-2.0
def generate_bsp_feature(video_list, video_infos, tem_results_dir, pgm_proposals_dir, top_k=1000, bsp_boundary_ratio=0.2, num_sample_start=8, num_sample_end=8, num_sample_action=16, num_sample_interp=3, tem_results_ext='.csv', pgm_proposal_ext='.csv', result_dict=None): """Generate Boundary-Sensitive Proposal Feature with given proposals. Args: video_list (list[int]): List of video indexes to generate bsp_feature. video_infos (list[dict]): List of video_info dict that contains 'video_name'. tem_results_dir (str): Directory to load temporal evaluation results. pgm_proposals_dir (str): Directory to load proposals. top_k (int): Number of proposals to be considered. Default: 1000 bsp_boundary_ratio (float): Ratio for proposal boundary (start/end). Default: 0.2. num_sample_start (int): Num of samples for actionness in start region. Default: 8. num_sample_end (int): Num of samples for actionness in end region. Default: 8. num_sample_action (int): Num of samples for actionness in center region. Default: 16. num_sample_interp (int): Num of samples for interpolation for each sample point. Default: 3. tem_results_ext (str): File extension for temporal evaluation model output. Default: '.csv'. pgm_proposal_ext (str): File extension for proposals. Default: '.csv'. result_dict (dict | None): The dict to save the results. Default: None. Returns: bsp_feature_dict (dict): A dict contains video_name as keys and bsp_feature as value. If result_dict is not None, save the results to it. """ if tem_results_ext != '.csv' or pgm_proposal_ext != '.csv': raise NotImplementedError('Only support csv format now.') bsp_feature_dict = {} for video_index in video_list: video_name = video_infos[video_index]['video_name'] # Load temporal evaluation results tem_path = osp.join(tem_results_dir, video_name + tem_results_ext) tem_results = np.loadtxt( tem_path, dtype=np.float32, delimiter=',', skiprows=1) score_action = tem_results[:, 0] seg_tmins = tem_results[:, 3] seg_tmaxs = tem_results[:, 4] video_scale = len(tem_results) video_gap = seg_tmaxs[0] - seg_tmins[0] video_extend = int(video_scale / 4 + 10) # Load proposals results proposal_path = osp.join(pgm_proposals_dir, video_name + pgm_proposal_ext) pgm_proposals = np.loadtxt( proposal_path, dtype=np.float32, delimiter=',', skiprows=1) pgm_proposals = pgm_proposals[:top_k] # Generate temporal sample points boundary_zeros = np.zeros([video_extend]) score_action = np.concatenate( (boundary_zeros, score_action, boundary_zeros)) begin_tp = [] middle_tp = [] end_tp = [] for i in range(video_extend): begin_tp.append(-video_gap / 2 - (video_extend - 1 - i) * video_gap) end_tp.append(video_gap / 2 + seg_tmaxs[-1] + i * video_gap) for i in range(video_scale): middle_tp.append(video_gap / 2 + i * video_gap) t_points = begin_tp + middle_tp + end_tp bsp_feature = [] for pgm_proposal in pgm_proposals: tmin = pgm_proposal[0] tmax = pgm_proposal[1] tlen = tmax - tmin # Temporal range for start tmin_0 = tmin - tlen * bsp_boundary_ratio tmin_1 = tmin + tlen * bsp_boundary_ratio # Temporal range for end tmax_0 = tmax - tlen * bsp_boundary_ratio tmax_1 = tmax + tlen * bsp_boundary_ratio # Generate features at start boundary tlen_start = (tmin_1 - tmin_0) / (num_sample_start - 1) tlen_start_sample = tlen_start / num_sample_interp t_new = [ tmin_0 - tlen_start / 2 + tlen_start_sample * i for i in range(num_sample_start * num_sample_interp + 1) ] y_new_start_action = np.interp(t_new, t_points, score_action) y_new_start = [ np.mean(y_new_start_action[i * num_sample_interp:(i + 1) * num_sample_interp + 1]) for i in range(num_sample_start) ] # Generate features at end boundary tlen_end = (tmax_1 - tmax_0) / (num_sample_end - 1) tlen_end_sample = tlen_end / num_sample_interp t_new = [ tmax_0 - tlen_end / 2 + tlen_end_sample * i for i in range(num_sample_end * num_sample_interp + 1) ] y_new_end_action = np.interp(t_new, t_points, score_action) y_new_end = [ np.mean(y_new_end_action[i * num_sample_interp:(i + 1) * num_sample_interp + 1]) for i in range(num_sample_end) ] # Generate features for action tlen_action = (tmax - tmin) / (num_sample_action - 1) tlen_action_sample = tlen_action / num_sample_interp t_new = [ tmin - tlen_action / 2 + tlen_action_sample * i for i in range(num_sample_action * num_sample_interp + 1) ] y_new_action = np.interp(t_new, t_points, score_action) y_new_action = [ np.mean(y_new_action[i * num_sample_interp:(i + 1) * num_sample_interp + 1]) for i in range(num_sample_action) ] feature = np.concatenate([y_new_action, y_new_start, y_new_end]) bsp_feature.append(feature) bsp_feature = np.array(bsp_feature) bsp_feature_dict[video_name] = bsp_feature if result_dict is not None: result_dict[video_name] = bsp_feature return bsp_feature_dict
Generate Boundary-Sensitive Proposal Feature with given proposals. Args: video_list (list[int]): List of video indexes to generate bsp_feature. video_infos (list[dict]): List of video_info dict that contains 'video_name'. tem_results_dir (str): Directory to load temporal evaluation results. pgm_proposals_dir (str): Directory to load proposals. top_k (int): Number of proposals to be considered. Default: 1000 bsp_boundary_ratio (float): Ratio for proposal boundary (start/end). Default: 0.2. num_sample_start (int): Num of samples for actionness in start region. Default: 8. num_sample_end (int): Num of samples for actionness in end region. Default: 8. num_sample_action (int): Num of samples for actionness in center region. Default: 16. num_sample_interp (int): Num of samples for interpolation for each sample point. Default: 3. tem_results_ext (str): File extension for temporal evaluation model output. Default: '.csv'. pgm_proposal_ext (str): File extension for proposals. Default: '.csv'. result_dict (dict | None): The dict to save the results. Default: None. Returns: bsp_feature_dict (dict): A dict contains video_name as keys and bsp_feature as value. If result_dict is not None, save the results to it.
generate_bsp_feature
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/bsn_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/bsn_utils.py
Apache-2.0
def encode_video(self, video: torch.Tensor) -> torch.Tensor: """Encode video.""" b, n, c, h, w = video.shape video = video.view(-1, c, h, w) frames_features = self.encode_image(video) frames_features = frames_features.view(b, n, -1) video_features = self.adapter(frames_features) return video_features
Encode video.
encode_video
python
open-mmlab/mmaction2
mmaction/models/similarity/clip_similarity.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/clip_similarity.py
Apache-2.0
def encode_image(self, image: torch.Tensor) -> torch.Tensor: """Encode image.""" return self.clip.encode_image(image)
Encode image.
encode_image
python
open-mmlab/mmaction2
mmaction/models/similarity/clip_similarity.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/clip_similarity.py
Apache-2.0
def encode_text(self, text: torch.Tensor) -> torch.Tensor: """Encode text.""" return self.clip.encode_text(text)
Encode text.
encode_text
python
open-mmlab/mmaction2
mmaction/models/similarity/clip_similarity.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/clip_similarity.py
Apache-2.0
def extract_feat(self, inputs: Dict[str, torch.Tensor], norm: bool = True) -> Tuple: """Extract features.""" text_inputs = inputs['text'] video_inputs = inputs['imgs'] text_features = self.encode_text(text_inputs) video_features = self.encode_video(video_inputs) if norm: text_features = text_features / text_features.norm( dim=-1, keepdim=True) video_features = video_features / video_features.norm( dim=-1, keepdim=True) return video_features, text_features
Extract features.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/similarity/clip_similarity.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/clip_similarity.py
Apache-2.0
def forward(self, inputs: Dict[str, torch.Tensor], data_samples: OptSampleList = None, mode: str = 'tensor') -> ForwardResults: """Forward function.""" if mode == 'tensor': return self.extract_feat(inputs, norm=False) elif mode == 'loss': video_features, text_features = self.extract_feat(inputs) video_features = torch.cat( GatherLayer.apply(video_features), dim=0) text_features = torch.cat(GatherLayer.apply(text_features), dim=0) logit_scale = self.clip.logit_scale.exp() logits_per_video = logit_scale * video_features @ text_features.t() logits_per_text = logits_per_video.t() labels = torch.arange(logits_per_video.shape[0]).to( logit_scale.device) sim_loss_v2t = self.loss(logits_per_video, labels) sim_loss_t2v = self.loss(logits_per_text, labels) losses = dict() losses['sim_loss_v2t'] = sim_loss_v2t losses['sim_loss_t2v'] = sim_loss_t2v return losses elif mode == 'predict': video_features, text_features = self.extract_feat(inputs) for ds, vf, tf in zip(data_samples, video_features, text_features): features = InstanceData(video_feature=vf, text_feature=tf) ds.features = features return data_samples else: raise RuntimeError(f'Invalid mode "{mode}". ' 'Only supports loss, predict and tensor mode')
Forward function.
forward
python
open-mmlab/mmaction2
mmaction/models/similarity/clip_similarity.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/clip_similarity.py
Apache-2.0
def train(self, mode: bool = True) -> None: """Set the optimization status when training.""" super().train(mode) self._freeze_stages()
Set the optimization status when training.
train
python
open-mmlab/mmaction2
mmaction/models/similarity/clip_similarity.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/clip_similarity.py
Apache-2.0
def _freeze_stages(self) -> None: """Prevent all the parameters from being optimized before ``self.frozen_layers``.""" if self.frozen_layers >= 0: top_layers = [ 'ln_final', 'text_projection', 'logit_scale', 'visual.ln_post', 'visual.proj' ] mid_layers = [ 'visual.transformer.resblocks', 'transformer.resblocks' ] for name, param in self.clip.named_parameters(): if any(name.find(n) == 0 for n in top_layers): continue elif any(name.find(n) == 0 for n in mid_layers): layer_n = int(name.split('.resblocks.')[1].split('.')[0]) if layer_n >= self.frozen_layers: continue param.requires_grad = False
Prevent all the parameters from being optimized before ``self.frozen_layers``.
_freeze_stages
python
open-mmlab/mmaction2
mmaction/models/similarity/clip_similarity.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/clip_similarity.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type)
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/similarity/adapters.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/adapters.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Perform quick gelu.""" return x * torch.sigmoid(1.702 * x)
Perform quick gelu.
forward
python
open-mmlab/mmaction2
mmaction/models/similarity/adapters.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/adapters.py
Apache-2.0
def attention(self, x: torch.Tensor) -> torch.Tensor: """Perform attention.""" self.attn_mask = self.attn_mask.to( dtype=x.dtype, device=x.device) if self.attn_mask is not None else None return self.attn( x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
Perform attention.
attention
python
open-mmlab/mmaction2
mmaction/models/similarity/adapters.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/adapters.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" x = x + self.attention(self.ln_1(x)) x = x + self.mlp(self.ln_2(x)) return x
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/similarity/adapters.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/adapters.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" return self.resblocks(x)
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/similarity/adapters.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/adapters.py
Apache-2.0
def init_weights(self) -> None: """Initialize the weights.""" nn.init.normal_(self.positional_embedding, std=0.01) proj_std = (self.transformer.width**-0.5) * ( (2 * self.transformer.layers)**-0.5) attn_std = self.transformer.width**-0.5 fc_std = (2 * self.transformer.width)**-0.5 for block in self.transformer.resblocks: nn.init.normal_(block.attn.in_proj_weight, std=attn_std) nn.init.normal_(block.attn.out_proj.weight, std=proj_std) nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
Initialize the weights.
init_weights
python
open-mmlab/mmaction2
mmaction/models/similarity/adapters.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/adapters.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" b, seq_length, c = x.size() x_original = x x = x + self.positional_embedding x = x.transpose(0, 1) # NLD -> LND x = self.transformer(x) x = x.transpose(0, 1) # LND -> NLD x = x.type(x_original.dtype) + x_original return x.mean(dim=1)
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/similarity/adapters.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/adapters.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" return x.mean(dim=self.dim)
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
mmaction/models/similarity/adapters.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/adapters.py
Apache-2.0
def forward(self, data: Union[dict, Tuple[dict]], training: bool = False) -> Union[dict, Tuple[dict]]: """Perform normalization, padding, bgr2rgb conversion and batch augmentation based on ``BaseDataPreprocessor``. Args: data (dict or Tuple[dict]): data sampled from dataloader. training (bool): Whether to enable training time augmentation. Returns: dict or Tuple[dict]: Data in the same format as the model input. """ data = self.cast_data(data) if isinstance(data, dict): return self.forward_onesample(data, training=training) elif isinstance(data, (tuple, list)): outputs = [] for data_sample in data: output = self.forward_onesample(data_sample, training=training) outputs.append(output) return tuple(outputs) else: raise TypeError(f'Unsupported data type: {type(data)}!')
Perform normalization, padding, bgr2rgb conversion and batch augmentation based on ``BaseDataPreprocessor``. Args: data (dict or Tuple[dict]): data sampled from dataloader. training (bool): Whether to enable training time augmentation. Returns: dict or Tuple[dict]: Data in the same format as the model input.
forward
python
open-mmlab/mmaction2
mmaction/models/data_preprocessors/data_preprocessor.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/data_preprocessors/data_preprocessor.py
Apache-2.0
def forward_onesample(self, data, training: bool = False) -> dict: """Perform normalization, padding, bgr2rgb conversion and batch augmentation on one data sample. Args: data (dict): data sampled from dataloader. training (bool): Whether to enable training time augmentation. Returns: dict: Data in the same format as the model input. """ inputs, data_samples = data['inputs'], data['data_samples'] inputs, data_samples = self.preprocess(inputs, data_samples, training) data['inputs'] = inputs data['data_samples'] = data_samples return data
Perform normalization, padding, bgr2rgb conversion and batch augmentation on one data sample. Args: data (dict): data sampled from dataloader. training (bool): Whether to enable training time augmentation. Returns: dict: Data in the same format as the model input.
forward_onesample
python
open-mmlab/mmaction2
mmaction/models/data_preprocessors/data_preprocessor.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/data_preprocessors/data_preprocessor.py
Apache-2.0
def forward(self, data: Dict, training: bool = False) -> Dict: """Preprocesses the data into the model input format. Args: data (dict): Data returned by dataloader. training (bool): Whether to enable training time augmentation. Returns: dict: Data in the same format as the model input. """ data = self.cast_data(data) inputs, data_samples = data['inputs'], data['data_samples'] for modality, modality_data in inputs.items(): preprocessor = self.preprocessors[modality] modality_data, data_samples = preprocessor.preprocess( modality_data, data_samples, training) inputs[modality] = modality_data data['inputs'] = inputs data['data_samples'] = data_samples return data
Preprocesses the data into the model input format. Args: data (dict): Data returned by dataloader. training (bool): Whether to enable training time augmentation. Returns: dict: Data in the same format as the model input.
forward
python
open-mmlab/mmaction2
mmaction/models/data_preprocessors/multimodal_data_preprocessor.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/data_preprocessors/multimodal_data_preprocessor.py
Apache-2.0
def assign_wrt_overlaps(self, overlaps: Tensor, gt_labels: Tensor) -> AssignResult: """Assign w.r.t. the overlaps of bboxes with gts. Args: overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, shape(k, n). gt_labels (Tensor): Labels of k gt_bboxes, shape (k, num_classes). Returns: :obj:`AssignResult`: The assign result. """ num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=max_overlaps, labels=assigned_labels) # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) # 2. assign negative: below # the negative inds are set to be 0 if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps < self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, tuple): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) & (max_overlaps < self.neg_iou_thr[1])] = 0 # 3. assign positive: above positive IoU threshold pos_inds = max_overlaps >= self.pos_iou_thr assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 if self.match_low_quality: # Low-quality matching will overwrite the assigned_gt_inds # assigned in Step 3. Thus, the assigned gt might not be the # best one for prediction. # For example, if bbox A has 0.9 and 0.8 iou with GT bbox # 1 & 2, bbox 1 will be assigned as the best target for bbox A # in step 3. However, if GT bbox 2's gt_argmax_overlaps = A, # bbox A's assigned_gt_inds will be overwritten to be bbox B. # This might be the reason that it is not used in ROI Heads. for i in range(num_gts): if gt_max_overlaps[i] >= self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] assigned_gt_inds[max_iou_inds] = i + 1 else: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 # consider multi-class case (AVA) assert len(gt_labels[0]) > 1 assigned_labels = assigned_gt_inds.new_zeros( (num_bboxes, len(gt_labels[0])), dtype=torch.float32) # If not assigned, labels will be all 0 pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1] return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=max_overlaps, labels=assigned_labels)
Assign w.r.t. the overlaps of bboxes with gts. Args: overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, shape(k, n). gt_labels (Tensor): Labels of k gt_bboxes, shape (k, num_classes). Returns: :obj:`AssignResult`: The assign result.
assign_wrt_overlaps
python
open-mmlab/mmaction2
mmaction/models/task_modules/assigners/max_iou_assigner_ava.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/task_modules/assigners/max_iou_assigner_ava.py
Apache-2.0
def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None) -> List[int]: """Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
build_inputs_with_special_tokens
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/tokenizer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/tokenizer.py
Apache-2.0
def all_gather_concat(data: torch.Tensor) -> torch.Tensor: """Gather tensors with different first-dimension size and concat to one tenosr. Note: Only the first dimension should be different. Args: data (Tensor): Tensor to be gathered. Returns: torch.Tensor: The concatenated tenosr. """ if dist.get_world_size() == 1: return data data_size = torch.tensor(data.size(0), device=data.device) sizes_list = dist.all_gather(data_size) total_length = sum(sizes_list) max_length = max(sizes_list) size_diff = max_length.item() - data_size.item() if size_diff: padding = torch.zeros( size_diff, *data.size()[1:], device=data.device, dtype=data.dtype) data = torch.cat((data, padding)) gather_list = dist.all_gather(data) # gather all data according to the default DDP sampler. For instance, # 8 samples on 2 GPUs, GPU0: [0,2,4,6], GPU1: [1,3,5,7], will be gathered # as [0,1,2,3,4,5,6,7] all_data = [] for gather_batch in zip(*gather_list): all_data.extend(gather_batch) return torch.stack(all_data)[:total_length]
Gather tensors with different first-dimension size and concat to one tenosr. Note: Only the first dimension should be different. Args: data (Tensor): Tensor to be gathered. Returns: torch.Tensor: The concatenated tenosr.
all_gather_concat
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py
Apache-2.0
def interpolate_pos_embed_beit(state_dict, new_model): """interpolate the positional embeddings. The spatial pe is relative and temporal pe is absolute. additional temporal pe is padded with 0. Args: state_dict (dict): The state_dict. new_model (nn.Module): The created model. Returns: dict. The state_dict with updated positional embeddings. """ state_dict = interpolate_pos_relative_bias_beit( state_dict_old=state_dict, state_dict_new=new_model.state_dict(), patch_shape_new=new_model.vision_encoder.embeddings.patch_embeddings. patch_shape, ) # absolute temporal pos bias temporal_pe_key = 'vision_encoder.embeddings.temporal_position_embeddings' if temporal_pe_key in state_dict: logger = MMLogger.get_current_instance() logger.info( f'interpolate temporal positional embeddings: {temporal_pe_key}') state_dict[temporal_pe_key] = load_temp_embed_with_mismatch( temp_embed_old=state_dict[temporal_pe_key], temp_embed_new=new_model.state_dict()[temporal_pe_key], ) return state_dict
interpolate the positional embeddings. The spatial pe is relative and temporal pe is absolute. additional temporal pe is padded with 0. Args: state_dict (dict): The state_dict. new_model (nn.Module): The created model. Returns: dict. The state_dict with updated positional embeddings.
interpolate_pos_embed_beit
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py
Apache-2.0
def load_temp_embed_with_mismatch(temp_embed_old, temp_embed_new, add_zero=True): """Add/Remove extra temporal_embeddings as needed. https://arxiv.org/abs/2104.00650 shows adding zero paddings works. temp_embed_old: (1, num_frames_old, 1, d) temp_embed_new: (1, num_frames_new, 1, d) add_zero: bool, if True, add zero, else, interpolate trained embeddings. """ # TODO zero pad num_frms_new = temp_embed_new.shape[1] num_frms_old = temp_embed_old.shape[1] logger = MMLogger.get_current_instance() logger.info( f'Load temporal_embeddings, lengths: {num_frms_old}-->{num_frms_new}') if num_frms_new > num_frms_old: if add_zero: temp_embed_new[:, :num_frms_old] \ = temp_embed_old # untrained embeddings are zeros. else: temp_embed_new = interpolate_temporal_pos_embed( temp_embed_old, num_frms_new) elif num_frms_new < num_frms_old: temp_embed_new = temp_embed_old[:, :num_frms_new] else: # = temp_embed_new = temp_embed_old return temp_embed_new
Add/Remove extra temporal_embeddings as needed. https://arxiv.org/abs/2104.00650 shows adding zero paddings works. temp_embed_old: (1, num_frames_old, 1, d) temp_embed_new: (1, num_frames_new, 1, d) add_zero: bool, if True, add zero, else, interpolate trained embeddings.
load_temp_embed_with_mismatch
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py
Apache-2.0
def interpolate_temporal_pos_embed(temp_embed_old, num_frames_new): """ temp_embed_old: (1, num_frames_old, 1, d) Returns: temp_embed_new: (1, num_frames_new, 1, d) """ temp_embed_old = temp_embed_old.squeeze(2).permute( 0, 2, 1) # (1, d, num_frames_old) temp_embed_new = F.interpolate( temp_embed_old, num_frames_new, mode='linear') # (1, d, num_frames_new) temp_embed_new = temp_embed_new.permute(0, 2, 1).unsqueeze( 2) # (1, num_frames_new, 1, d) return temp_embed_new
temp_embed_old: (1, num_frames_old, 1, d) Returns: temp_embed_new: (1, num_frames_new, 1, d)
interpolate_temporal_pos_embed
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py
Apache-2.0
def interpolate_pos_relative_bias_beit(state_dict_old, state_dict_new, patch_shape_new): """ Args: state_dict_old: loaded state dict state_dict_new: state dict for model with new image size patch_shape_new: new model patch_shape ref: https://github.com/microsoft/unilm/blob/master/beit/run_class_finetuning.py # noqa: E501 """ all_keys = list(state_dict_old.keys()) for key in all_keys: if 'relative_position_index' in key: state_dict_old.pop(key) if 'relative_position_bias_table' in key: rel_pos_bias = state_dict_old[key] src_num_pos, num_attn_heads = rel_pos_bias.size() dst_num_pos, _ = state_dict_new[key].size() dst_patch_shape = patch_shape_new if dst_patch_shape[0] != dst_patch_shape[1]: raise NotImplementedError() num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * ( dst_patch_shape[1] * 2 - 1) src_size = int((src_num_pos - num_extra_tokens)**0.5) dst_size = int((dst_num_pos - num_extra_tokens)**0.5) if src_size != dst_size: extra_tokens = rel_pos_bias[-num_extra_tokens:, :] rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] def geometric_progression(a, r, n): return a * (1.0 - r**n) / (1.0 - r) left, right = 1.01, 1.5 while right - left > 1e-6: q = (left + right) / 2.0 gp = geometric_progression(1, q, src_size // 2) if gp > dst_size // 2: right = q else: left = q dis = [] cur = 1 for i in range(src_size // 2): dis.append(cur) cur += q**(i + 1) r_ids = [-_ for _ in reversed(dis)] x = r_ids + [0] + dis y = r_ids + [0] + dis t = dst_size // 2.0 dx = np.arange(-t, t + 0.1, 1.0) dy = np.arange(-t, t + 0.1, 1.0) all_rel_pos_bias = [] for i in range(num_attn_heads): z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy() f = interpolate.interp2d(x, y, z, kind='cubic') all_rel_pos_bias.append( torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to( rel_pos_bias.device)) rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) state_dict_old[key] = new_rel_pos_bias return state_dict_old
Args: state_dict_old: loaded state dict state_dict_new: state dict for model with new image size patch_shape_new: new model patch_shape ref: https://github.com/microsoft/unilm/blob/master/beit/run_class_finetuning.py # noqa: E501
interpolate_pos_relative_bias_beit
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py
Apache-2.0
def load_tf_weights_in_bert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" logger = MMLogger.get_current_instance() try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info('Converting TensorFlow checkpoint from {}'.format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info('Loading TF weight {} with shape {}'.format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in [ 'adam_v', 'adam_m', 'AdamWeightDecayOptimizer', 'AdamWeightDecayOptimizer_1', 'global_step', ] for n in name): logger.info('Skipping {}'.format('/'.join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): scope_names = re.split(r'_(\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'kernel' or scope_names[0] == 'gamma': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta': pointer = getattr(pointer, 'bias') elif scope_names[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info('Skipping {}'.format('/'.join(name))) continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info('Initialize PyTorch weight {}'.format(name)) pointer.data = torch.from_numpy(array) return model
Load tf checkpoints in a pytorch model.
load_tf_weights_in_bert
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/modeling_bert.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/modeling_bert.py
Apache-2.0
def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_( mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_()
Initialize the weights.
_init_weights
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/modeling_bert.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/modeling_bert.py
Apache-2.0
def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads)
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
_prune_heads
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/modeling_bert.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/modeling_bert.py
Apache-2.0
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: """Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (:obj:`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (:obj:`Tuple[int]`): The shape of the input to the model. device: (:obj:`torch.device`): The device of the input to the model. Returns: :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = ( seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]) # in case past_key_values are used we need to add a prefix ones mask to the causal mask # causal and attention masks must have same type with pytorch version < 1.3 causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[ 1] - causal_mask.shape[1] causal_mask = torch.cat( [ torch.ones( (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype, ), causal_mask, ], axis=-1, ) extended_attention_mask = ( causal_mask[:, None, :, :] * attention_mask[:, None, None, :]) else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( 'Wrong shape for input_ids (shape {}) or attention_mask (shape {})' .format(input_shape, attention_mask.shape)) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to( dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask
Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (:obj:`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (:obj:`Tuple[int]`): The shape of the input to the model. device: (:obj:`torch.device`): The device of the input to the model. Returns: :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
get_extended_attention_mask
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/modeling_bert.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/modeling_bert.py
Apache-2.0
def tie_aux_decoder_weights(self, module, aux_modules): """Tie decoder weights of all `aux_modules` to `module`, (not bias)""" for m in aux_modules: m.predictions.decoder.weight = module.predictions.decoder.weight
Tie decoder weights of all `aux_modules` to `module`, (not bias)
tie_aux_decoder_weights
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/modeling_bert.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/modeling_bert.py
Apache-2.0
def interpolate_temporal_pos_embed(temp_embed_old, num_frames_new): """ temp_embed_old: (1, num_frames_old, 1, d) Returns: temp_embed_new: (1, num_frames_new, 1, d) """ temp_embed_old = temp_embed_old.squeeze(2).permute( 0, 2, 1) # (1, d, num_frames_old) temp_embed_new = F.interpolate( temp_embed_old, num_frames_new, mode='linear') # (1, d, num_frames_new) temp_embed_new = temp_embed_new.permute(0, 2, 1).unsqueeze( 2) # (1, num_frames_new, 1, d) return temp_embed_new
temp_embed_old: (1, num_frames_old, 1, d) Returns: temp_embed_new: (1, num_frames_new, 1, d)
interpolate_temporal_pos_embed
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/beit3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/beit3d.py
Apache-2.0
def __init__(self, config: BeitConfig): """TODO: to be defined.""" super().__init__() self.layernorm_before = nn.LayerNorm( config.hidden_size, eps=config.layer_norm_eps) self.attention = BeitAttention(config, window_size=None) self.scale = nn.Parameter( config.temporal_model_init_value * torch.ones( (config.hidden_size)), requires_grad=True, ) self.drop_path = BeitDropPath(config.drop_path_rate)
TODO: to be defined.
__init__
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/beit3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/beit3d.py
Apache-2.0
def forward(self, hidden_states: torch.Tensor): """forward function. Args: hidden_states (torch.Tensor): The input. Shape: [b,t,l,c] Returns: TODO """ b = hidden_states.shape[0] output = einops.rearrange(hidden_states, 'b t l c -> (b l) t c') output = self.layernorm_before(output) output = self.attention(output) output = einops.rearrange(output[0], '(b l) t c -> b t l c', b=b) return hidden_states + self.drop_path(output[0]) * self.scale
forward function. Args: hidden_states (torch.Tensor): The input. Shape: [b,t,l,c] Returns: TODO
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/beit3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/beit3d.py
Apache-2.0
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: """ Args: hidden_states (torch.Tensor): Shape: [B,T,L,C] """ if self.layernorm is not None: # Mean pool the final hidden states of the patch tokens # patch_tokens = hidden_states[:, 1 + self.num_prompts :, :] if self.num_prompts > 0: patch_tokens = hidden_states[:, :, 1:-self.num_prompts, :] else: patch_tokens = hidden_states[:, :, 1:, :] pooled_output = self.layernorm(patch_tokens.mean(2)) else: # Pool by simply taking the final hidden state of the [CLS] token pooled_output = hidden_states[:, :, 0] return pooled_output
Args: hidden_states (torch.Tensor): Shape: [B,T,L,C]
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/beit3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/beit3d.py
Apache-2.0
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None ) -> torch.Tensor: """ Args: pixel_values (torch.Tensor): The input image patches. Shape: [B, T, C, H, W]. """ t = pixel_values.shape[1] pixel_values = einops.rearrange(pixel_values, 'b t c h w -> (b t) c h w') embeddings = self.patch_embeddings(pixel_values) batch_size, seq_len, _ = embeddings.size() # [(b t) l c] cls_tokens = self.cls_token.expand(batch_size, -1, -1) if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_tokens w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1 - w) + mask_tokens * w if self.prompt_tokens is not None: prompt_tokens = self.prompt_tokens.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings, prompt_tokens), dim=1) else: embeddings = torch.cat((cls_tokens, embeddings), dim=1) # [B*T, L, C] if self.position_embeddings is not None: embeddings = embeddings + self.position_embeddings embeddings = einops.rearrange(embeddings, '(b t) l c -> b t l c', t=t) if self.temporal_position_embeddings is not None: if t <= self.temporal_position_embeddings.shape[1]: embeddings = embeddings + \ self.temporal_position_embeddings[:, :t] else: tpe = interpolate_temporal_pos_embed( self.temporal_position_embeddings, t) embeddings = embeddings + tpe embeddings = self.dropout(embeddings) return embeddings
Args: pixel_values (torch.Tensor): The input image patches. Shape: [B, T, C, H, W].
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/beit3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/beit3d.py
Apache-2.0
def loss(self, inputs, data_samples): """Calculate losses from a batch of inputs and data samples. Args: inputs (dict): A batch of inputs. The input tensor with of at least one modality. For image, the value is a tensor of shape (N, C, ...) in general. For text, the value is a dict of tokenized text inputs. data_samples (Optional[List[DataSample]]): The annotation data of every samples. Defaults to None. Returns: Dict[str, torch.tensor]: a dictionary of loss components of """ questions, question_output = self.forward_encoder(inputs, data_samples) weights = torch.cat( [torch.tensor(sample.gt_answer_weight) for sample in data_samples], dim=0).to(inputs.device) raw_answers = [] for sample in data_samples: raw_answers.extend(sample.gt_answer) answer_count = torch.tensor([ len(sample.gt_answer) for sample in data_samples ]).to(inputs.device) answers = [a + ' ' + '[SEP]' for a in raw_answers] answers = self.tokenizer( answers, padding='max_length', truncation=True, max_length=self.max_answer_len, return_tensors='pt').to(inputs.device) answer_targets = answers.input_ids.masked_fill( answers.input_ids == self.tokenizer.pad_token_id, -100) question_states = [] question_atts = [] for b, n in enumerate(answer_count): question_states += [question_output.last_hidden_state[b]] * n question_atts += [questions.attention_mask[b]] * n question_states = torch.stack(question_states, 0).to(inputs.device) question_atts = torch.stack(question_atts, 0).to(inputs.device) answer_output = self.text_decoder( answers.input_ids, attention_mask=answers.attention_mask, encoder_hidden_states=question_states, encoder_attention_mask=question_atts, labels=answer_targets, return_dict=True, reduction='none', ) loss = weights * answer_output.loss loss = loss.sum() / inputs.size(0) return dict(loss=loss)
Calculate losses from a batch of inputs and data samples. Args: inputs (dict): A batch of inputs. The input tensor with of at least one modality. For image, the value is a tensor of shape (N, C, ...) in general. For text, the value is a dict of tokenized text inputs. data_samples (Optional[List[DataSample]]): The annotation data of every samples. Defaults to None. Returns: Dict[str, torch.tensor]: a dictionary of loss components of
loss
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_vqa.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_vqa.py
Apache-2.0
def rank_answer(self, question_states, question_atts, answer_ids, answer_atts, k): """ question_states: (bsz, Lq, d) answer_ids: answer input id after tokenization, (#answers, La) """ num_ques = question_states.size(0) start_ids = answer_ids[0, 0].repeat(num_ques, 1) # bos token start_output = self.text_decoder( start_ids, encoder_hidden_states=question_states, encoder_attention_mask=question_atts, return_dict=True, reduction='none', ) logits = start_output.logits[:, 0, :] # first token's logit # topk_probs: top-k probability # topk_ids: [num_question, k] answer_first_token = answer_ids[:, 1] prob_first_token = F.softmax( logits, dim=1).index_select( dim=1, index=answer_first_token) topk_probs, topk_ids = prob_first_token.topk(k, dim=1) # answer input: [num_question*k, answer_len] input_ids = [] input_atts = [] for b, topk_id in enumerate(topk_ids): input_ids.append(answer_ids.index_select(dim=0, index=topk_id)) input_atts.append(answer_atts.index_select(dim=0, index=topk_id)) input_ids = torch.cat(input_ids, dim=0) input_atts = torch.cat(input_atts, dim=0) targets_ids = input_ids.masked_fill( input_ids == self.tokenizer.pad_token_id, -100) question_states = question_states.repeat_interleave(k, dim=0) question_atts = question_atts.repeat_interleave(k, dim=0) output = self.text_decoder( input_ids, attention_mask=input_atts, encoder_hidden_states=question_states, encoder_attention_mask=question_atts, labels=targets_ids, return_dict=True, reduction='none', ) answer_loss = output.loss answer_loss = answer_loss.view(input_ids.size(0), -1) # topk_prob: first token probability topk_probs = topk_probs.view(-1, 1) log_probs = torch.cat([topk_probs.log(), -answer_loss], dim=1) # re-calculate log probabilities for the answer sequences # using chain rule log_probs_sum = log_probs.sum(1) log_probs_sum = log_probs_sum.view(num_ques, k) topk_probs = F.softmax(log_probs_sum, dim=-1) # get top-k after re-ranking topk_probs, rerank_id = topk_probs.topk(k, dim=1) topk_ids = torch.gather(topk_ids, 1, rerank_id) return topk_ids, topk_probs
question_states: (bsz, Lq, d) answer_ids: answer input id after tokenization, (#answers, La)
rank_answer
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_vqa.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_vqa.py
Apache-2.0
def preprocess_state_dict(self, state_dict): """Preprocess pretrained checkpoint for text_encoder and text_decoder.""" for key in list(state_dict.keys()): if 'bert' in key: encoder_key = key.replace('bert.', '') state_dict[encoder_key] = state_dict[key] # init text decoder as multimodal encoder # (last 6 layers of model.text_encoder) # only for generation tasks like VQA if self.text_decoder_cfg and 'text_encoder' in key: if 'layer' in key: encoder_keys = key.split('.') layer_num = int(encoder_keys[4]) if layer_num < self.text_encoder_cfg.fusion_layer: del state_dict[key] continue else: decoder_layer_num = layer_num - 9 encoder_keys[4] = str(decoder_layer_num) encoder_key = '.'.join(encoder_keys) else: encoder_key = key decoder_key = encoder_key.replace('text_encoder', 'text_decoder') state_dict[decoder_key] = state_dict[key] del state_dict[key] return state_dict
Preprocess pretrained checkpoint for text_encoder and text_decoder.
preprocess_state_dict
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_vqa.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_vqa.py
Apache-2.0
def loss( self, inputs: torch.Tensor, data_samples: Optional[List[ActionDataSample]] = None, ) -> Dict[str, torch.tensor]: """Calculate losses from a batch of inputs and data samples. Args: inputs (dict): A batch of inputs. The input tensor with of at least one modality. For image, the value is a tensor of shape (N, C, ...) in general. For text, the value is a dict of tokenized text inputs. data_samples (Optional[List[DataSample]]): The annotation data of every samples. Defaults to None. Returns: Dict[str, torch.tensor]: a dictionary of loss components of """ output = self.extract_feat(inputs, data_samples) text_embeds = output['text_embeds'] text_attn_mask = output['text_attn_mask'] image_embeds = output['image_embeds'] image_feat = output['image_feat'] text_feat = output['text_feat'] image_atts = torch.ones( image_embeds.size()[:-1], dtype=torch.long).to(self.device) # ITC Loss # B*world_size, D image_feat_all = torch.cat(dist.all_gather(image_feat)) # B*world_size, D text_feat_all = torch.cat(dist.all_gather(text_feat)) # image to text similarity # B, B*world_size sim_i2t = torch.einsum('mld,nd->mln', image_feat, text_feat_all).mean(1) / self.temp # text-image similarity # B, B*world_size sim_t2i = torch.einsum('md,nld->mln', text_feat, image_feat_all).mean(1) / self.temp rank = dist.get_rank() bs = inputs.size(0) itc_targets = torch.linspace( rank * bs, rank * bs + bs - 1, bs, dtype=int).to(self.device) itc_loss = (F.cross_entropy(sim_i2t, itc_targets) + F.cross_entropy(sim_t2i, itc_targets)) / 2 # prepare for itm output_pos = self.text_encoder( encoder_embeds=text_embeds, attention_mask=text_attn_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True, mode='fusion', ) idx = torch.tensor([i.gt_video_id for i in data_samples]).view(-1, 1) bs = idx.size(0) if self.negative_all_rank: idxs = torch.cat(dist.all_gather(idx)) image_feat_world = torch.cat(dist.all_gather(image_feat)) text_feat_world = torch.cat(dist.all_gather(text_feat)) att_mask_world = torch.cat(dist.all_gather(text_attn_mask)) text_embeds_world = torch.cat(all_gather_with_grad(text_embeds)) image_embeds_world = torch.cat(all_gather_with_grad(image_embeds)) else: idxs = idx image_feat_world = image_feat.detach() text_feat_world = text_feat.detach() image_embeds_world = image_embeds text_embeds_world = text_embeds att_mask_world = text_attn_mask with torch.no_grad(): # compute sample similarity sim_i2t = torch.einsum('mld,nd->mln', image_feat, text_feat_world).mean(1) / self.temp sim_t2i = torch.einsum('md,nld->mln', text_feat, image_feat_world).mean(1) / self.temp mask = torch.eq(idx, idxs.t()).to(self.device) weights_i2t = F.softmax(sim_i2t + 1e-4, dim=1) weights_i2t.masked_fill_(mask, 0) weights_t2i = F.softmax(sim_t2i + 1e-4, dim=1) weights_t2i.masked_fill_(mask, 0) # select a negative image for each text neg_idx = torch.multinomial(weights_t2i, 1).squeeze() image_embeds_neg = image_embeds_world[neg_idx] # select a negative text for each image neg_idx = torch.multinomial(weights_i2t, 1).squeeze() text_embeds_neg = text_embeds_world[neg_idx] text_atts_neg = att_mask_world[neg_idx] text_embeds_all = torch.cat([text_embeds, text_embeds_neg], dim=0) text_atts_all = torch.cat([text_attn_mask, text_atts_neg], dim=0) image_embeds_all = torch.cat([image_embeds_neg, image_embeds], dim=0) image_atts_all = torch.cat([image_atts, image_atts], dim=0) output_neg = self.text_encoder( encoder_embeds=text_embeds_all, attention_mask=text_atts_all, encoder_hidden_states=image_embeds_all, encoder_attention_mask=image_atts_all, return_dict=True, mode='fusion', ) vl_embeddings = torch.cat( [ output_pos.last_hidden_state[:, 0, :], output_neg.last_hidden_state[:, 0, :], ], dim=0, ) itm_targets = torch.ones((3 * bs, ), dtype=torch.long, device=inputs.device) itm_targets[bs:] = 0 itm_logit = self.itm_head(vl_embeddings) itm_loss = F.cross_entropy(itm_logit, itm_targets) return dict(itc_loss=itc_loss, itm_loss=itm_loss)
Calculate losses from a batch of inputs and data samples. Args: inputs (dict): A batch of inputs. The input tensor with of at least one modality. For image, the value is a tensor of shape (N, C, ...) in general. For text, the value is a dict of tokenized text inputs. data_samples (Optional[List[DataSample]]): The annotation data of every samples. Defaults to None. Returns: Dict[str, torch.tensor]: a dictionary of loss components of
loss
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def extract_feat( self, images: torch.Tensor = None, data_samples: List[ActionDataSample] = None, return_texts=True, ) -> Dict[str, torch.Tensor]: """Extract features from the input dict. Args: images (tensor, optional): The images to extract features. Defaults to None. data_samples (list, optional): The data samples containing texts to extract features. Defaults to None. return_texts (bool): Whether to return the tokenized text and the corresponding attention masks. Defaults to True. Returns: Tuple[torch.Tensor]: The output features. If multimodal_backbone is not exist, tuple of torch.Tensor will be returned. """ if data_samples is not None: texts = self.preprocess_text(data_samples) else: texts = None assert images is not None or texts is not None, \ 'At least single modality should be passed as inputs.' results = {} if texts is not None and return_texts: results.update({ 'text_ids': texts.input_ids, 'text_attn_mask': texts.attention_mask, }) # extract image features if images is not None: image_embeds, pooled_image_embeds = self.encode_vision(images) # concat temporal embeds image_embeds = rearrange(image_embeds, 'b t l c -> b (t l) c').contiguous() results['image_embeds'] = image_embeds results['image_feat'] = F.normalize( self.vision_proj(pooled_image_embeds), dim=-1) # extract text features if texts is not None: texts_output = self.text_encoder( texts.input_ids, attention_mask=texts.attention_mask, return_dict=True, mode='text') text_embeds = texts_output.last_hidden_state pooled_text_feat = text_embeds[:, 0] results['text_embeds'] = text_embeds results['text_feat'] = F.normalize( self.text_proj(pooled_text_feat), dim=-1) return results
Extract features from the input dict. Args: images (tensor, optional): The images to extract features. Defaults to None. data_samples (list, optional): The data samples containing texts to extract features. Defaults to None. return_texts (bool): Whether to return the tokenized text and the corresponding attention masks. Defaults to True. Returns: Tuple[torch.Tensor]: The output features. If multimodal_backbone is not exist, tuple of torch.Tensor will be returned.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def compute_score_matrix_i2t(self, img_feats, img_embeds, text_feats, text_embeds, text_atts): """Compare the score matrix for image-to-text retrieval. Every image should compare to all the text features. Args: img_feats (torch.Tensor): The input img feats tensor with shape (M, C). M stands for numbers of samples on a single GPU. img_embeds (torch.Tensor): The input img embeds tensor with shape (M, C). M stands for numbers of samples on a single GPU. text_feats (torch.Tensor): The input text feats tensor with shape (N, C). N stands for numbers of all samples on all GPUs. text_embeds (torch.Tensor): The input tensor with shape (N, C). text_atts (torch.Tensor): The input tensor with shape (N, C). Returns: torch.Tensor: Score matrix of image-to-text retrieval. """ # compute i2t sim matrix sim_matrix_i2t = torch.einsum('mld,nd->mln', img_feats, text_feats).mean(1) if self.fast_match: return sim_matrix_i2t score_matrix_i2t = torch.full((img_feats.size(0), text_feats.size(0)), -100.0).to(self.device) for i in track_on_main_process( range(img_feats.size(0)), 'Compute I2T scores...'): sims = sim_matrix_i2t[i] topk_sim, topk_idx = sims.topk(k=self.topk, dim=0) topk_bz = 32 encoder_output = img_embeds[i].repeat(topk_bz, 1, 1) encoder_att = torch.ones( encoder_output.size()[:-1], dtype=torch.long).to(self.device) for j in range(0, self.topk // topk_bz): batch_topk = topk_idx[j * topk_bz:(j + 1) * topk_bz] output = self.text_encoder( encoder_embeds=text_embeds[batch_topk], attention_mask=text_atts[batch_topk], encoder_hidden_states=encoder_output, encoder_attention_mask=encoder_att, return_dict=True, mode='fusion') score = self.itm_head(output.last_hidden_state[:, 0, :])[:, 1] score_matrix_i2t[i, batch_topk] = score return score_matrix_i2t
Compare the score matrix for image-to-text retrieval. Every image should compare to all the text features. Args: img_feats (torch.Tensor): The input img feats tensor with shape (M, C). M stands for numbers of samples on a single GPU. img_embeds (torch.Tensor): The input img embeds tensor with shape (M, C). M stands for numbers of samples on a single GPU. text_feats (torch.Tensor): The input text feats tensor with shape (N, C). N stands for numbers of all samples on all GPUs. text_embeds (torch.Tensor): The input tensor with shape (N, C). text_atts (torch.Tensor): The input tensor with shape (N, C). Returns: torch.Tensor: Score matrix of image-to-text retrieval.
compute_score_matrix_i2t
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def compute_score_matrix_t2i(self, img_feats, img_embeds, text_feats, text_embeds, text_atts): """Compare the score matrix for text-to-image retrieval. Every text should compare to all the image features. Args: img_feats (torch.Tensor): The input img feats tensor with shape (M, C). M stands for numbers of samples on a single GPU. img_embeds (torch.Tensor): The input img embeds tensor with shape (M, C). M stands for numbers of samples on a single GPU. text_feats (torch.Tensor): The input text feats tensor with shape (N, C). N stands for numbers of all samples on all GPUs. text_embeds (torch.Tensor): The input tensor with shape (M, C). text_atts (torch.Tensor): The input tensor with shape (M, C). Returns: torch.Tensor: Score matrix of text-to-image retrieval. """ # compute t2i sim matrix sim_matrix_t2i = torch.einsum('md,nld->mln', text_feats, img_feats).mean(1) if self.fast_match: return sim_matrix_t2i score_matrix_t2i = torch.full((text_feats.size(0), img_feats.size(0)), -100.0).to(self.device) for i in track_on_main_process( range(text_feats.size(0)), 'Compute T2I scores...'): sims = sim_matrix_t2i[i] topk_sim, topk_idx = sims.topk(k=self.topk, dim=0) topk_bz = 32 for j in range(0, self.topk // topk_bz): batch_topk = topk_idx[j * topk_bz:(j + 1) * topk_bz] encoder_output = img_embeds[batch_topk] encoder_att = torch.ones( encoder_output.size()[:-1], dtype=torch.long).to(self.device) output = self.text_encoder( encoder_embeds=text_embeds[i].repeat(topk_bz, 1, 1), attention_mask=text_atts[i].repeat(topk_bz, 1), encoder_hidden_states=encoder_output, encoder_attention_mask=encoder_att, return_dict=True, mode='fusion') score = self.itm_head(output.last_hidden_state[:, 0, :])[:, 1] score_matrix_t2i[i, batch_topk] = score return score_matrix_t2i
Compare the score matrix for text-to-image retrieval. Every text should compare to all the image features. Args: img_feats (torch.Tensor): The input img feats tensor with shape (M, C). M stands for numbers of samples on a single GPU. img_embeds (torch.Tensor): The input img embeds tensor with shape (M, C). M stands for numbers of samples on a single GPU. text_feats (torch.Tensor): The input text feats tensor with shape (N, C). N stands for numbers of all samples on all GPUs. text_embeds (torch.Tensor): The input tensor with shape (M, C). text_atts (torch.Tensor): The input tensor with shape (M, C). Returns: torch.Tensor: Score matrix of text-to-image retrieval.
compute_score_matrix_t2i
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def _get_predictions(self, result: torch.Tensor, data_samples: List[ActionDataSample], mode: str = 'i2t'): """Post-process the output of retriever. Args: result (torch.Tensor): Score matrix of single retrieve, either from image or text. data_samples (List[ActionDataSample], optional): The annotation data of every samples. mode (str): Retrieve mode, either `i2t` for image to text, or `t2i` text to image. Defaults to `i2t`. Returns: List[ActionDataSample]: the raw data_samples with the predicted results. """ # create data sample if not exists if data_samples is None: data_samples = [ActionDataSample() for _ in range(result.size(0))] elif mode == 't2i': # Process data samples to align with the num of texts. new_data_samples = [] for sample in data_samples: if isinstance(sample.text, (list, tuple)): texts = sample.text else: texts = [sample.text] for i, text in enumerate(texts): new_sample = ActionDataSample(text=text) if 'gt_video_id' in sample: new_sample.gt_label = sample.gt_video_id[i] new_data_samples.append(new_sample) assert len(new_data_samples) == result.size(0) data_samples = new_data_samples elif mode == 'i2t': for sample in data_samples: if 'gt_text_id' in sample: sample.gt_label = sample.gt_text_id else: raise ValueError(f'Type {mode} is not supported.') for data_sample, score in zip(data_samples, result): idx = score.argmax(keepdim=True).detach() data_sample.set_pred_score(score) data_sample.set_pred_label(idx) return data_samples
Post-process the output of retriever. Args: result (torch.Tensor): Score matrix of single retrieve, either from image or text. data_samples (List[ActionDataSample], optional): The annotation data of every samples. mode (str): Retrieve mode, either `i2t` for image to text, or `t2i` text to image. Defaults to `i2t`. Returns: List[ActionDataSample]: the raw data_samples with the predicted results.
_get_predictions
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def forward(self, x: torch.Tensor): """forward. Args: x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w Returns: features after adapter. The same shape as input. """ if x.shape[1] == 1: # for single frame, return itself. return x shortcut = x x = self.linear1(x) cls = x[:, :, :1, :] tokens = x[:, :, 1:, :] tokens = einops.rearrange( tokens, 'b t (h w) c -> b c t h w', h=self.h).contiguous() tokens = self.conv(tokens) tokens = einops.rearrange(tokens, 'b c t h w -> b t (h w) c') x = torch.cat([cls, tokens], dim=2) # [b, t, 1+h*w, c] x = self.act(x) x = self.linear2(x) return shortcut + self.scale * self.droppath(x)
forward. Args: x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w Returns: features after adapter. The same shape as input.
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/temporal_model.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py
Apache-2.0
def __init__(self, input_dim=768, droppath_rate=0.1): """ Kwargs: input_dim (int): The input feature dimension. """ super().__init__() self._input_dim = input_dim self.temporal_attn = MultiheadAttention( input_dim, num_heads=input_dim // 64) self.norm = LayerNorm(input_dim, eps=1e-12) self.linear = Linear(input_dim, input_dim) self.droppath = DropPath(droppath_rate) self.scale = nn.parameter.Parameter(torch.zeros([]))
Kwargs: input_dim (int): The input feature dimension.
__init__
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/temporal_model.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py
Apache-2.0
def forward(self, x: torch.Tensor): """forward. Args: x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w Returns: features after adapter. The same shape as input. """ if x.shape[1] == 1: # for single frame, return itself. return x shortcut = x x = einops.rearrange(x, 'b t l c -> t (b l) c') x = self.norm(x) x = self.temporal_attn(x, x, x)[0] x = einops.rearrange(x, 't (b l) c -> b t l c', b=shortcut.shape[0]) return shortcut + self.scale * self.droppath(x)
forward. Args: x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w Returns: features after adapter. The same shape as input.
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/temporal_model.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py
Apache-2.0
def __init__(self, input_dim=768, droppath_rate=0.1, window_size=(2, 2)): """ Kwargs: input_dim (int): The input feature dimension. """ super().__init__() self._input_dim = input_dim self.temporal_attn = MultiheadAttention( input_dim, num_heads=input_dim // 64) self.norm = LayerNorm(input_dim, eps=1e-12) self.droppath = DropPath(droppath_rate) self.scale = nn.parameter.Parameter(torch.zeros([])) self.wh, self.ww = window_size
Kwargs: input_dim (int): The input feature dimension.
__init__
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/temporal_model.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py
Apache-2.0
def forward(self, x: torch.Tensor): """forward. Args: x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w Returns: features after adapter. The same shape as input. """ if x.shape[1] == 1: # for single frame, return itself. return x shortcut = x h = w = int(math.sqrt(x.shape[2] - 1)) cls_token = x[:, :, :1, :] x = einops.rearrange( x[:, :, 1:, :], 'b t (nh wh nw ww) c -> (t wh ww) (b nh nw) c', nh=h // self.wh, wh=self.wh, nw=w // self.ww, ww=self.ww, ) x = self.norm(x) x = self.temporal_attn(x, x, x)[0] x = einops.rearrange( x, '(t wh ww) (b nh nw) c -> b t (nh wh nw ww) c', wh=self.wh, ww=self.ww, nh=h // self.wh, nw=w // self.ww, ) # add back cls token. x = torch.concat([cls_token, x], dim=2) return shortcut + self.scale * self.droppath(x)
forward. Args: x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w Returns: features after adapter. The same shape as input.
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/temporal_model.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py
Apache-2.0
def __init__(self, input_dim=768, droppath_rate=0.1, num_prompts=1): """ Kwargs: input_dim (int): The input feature dimension. """ super().__init__() d_model = input_dim self.message_fc = nn.Linear(d_model, d_model) self.message_ln = LayerNorm(d_model, eps=1e-12) self.message_attn = nn.MultiheadAttention(d_model, d_model // 64) self.num_prompts = num_prompts self.droppath = DropPath(droppath_rate)
Kwargs: input_dim (int): The input feature dimension.
__init__
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/temporal_model.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py
Apache-2.0
def forward(self, x: torch.Tensor): """forward. Args: x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w Returns: features after adapter. The same shape as input. """ if x.shape[1] == 1: # for single frame, return itself. return x msg_token = self.message_ln(self.message_fc(x[:, :, 0, :])) # [b, t, c] msg_token = rearrange(msg_token, 'b t c -> t b c') msg_token = msg_token + self.droppath( self.message_attn(msg_token, msg_token, msg_token)[0]) msg_token = rearrange(msg_token, 't b c -> b t c') # replace the last prompt token with msg_token. x = torch.cat([x[:, :, :-1, :], msg_token.unsqueeze(2)], dim=2) # [b, t, l+1, c] return x
forward. Args: x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w Returns: features after adapter. The same shape as input.
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/temporal_model.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py
Apache-2.0
def predict(self, inputs, data_samples, **kwargs): """Predict captions from a batch of inputs. Args: images (torch.Tensor): The input images tensor with shape (N, C, ...) in general. data_samples (List[DataSample], optional): The annotation data of every samples. Defaults to None. **kwargs: Other keyword arguments accepted by the ``predict`` Returns: List[ActionDataSample]: Return list of data samples. """ num_options_per_q = len(data_samples[0].caption_options) for sample in data_samples: sample.text = sample.caption_options output = self.extract_feat(inputs, data_samples) text_embeds = output['text_embeds'] text_attn_mask = output['text_attn_mask'] image_embeds = output['image_embeds'] image_feat = output['image_feat'] text_feat = output['text_feat'] # compute similarity between vision feat and caption feat text_feat = rearrange( text_feat, '(b n) c -> b c n', n=num_options_per_q) sim = torch.matmul(image_feat.mean(1, keepdim=True), text_feat).squeeze(1) / self.temp sim = F.softmax(sim, dim=1).flatten() # cross-modal encode encoder_output = image_embeds.repeat_interleave( num_options_per_q, dim=0) image_atts = torch.ones( encoder_output.size()[:-1], dtype=torch.long).to(inputs.device) output = self.text_encoder( encoder_embeds=text_embeds, attention_mask=text_attn_mask, encoder_hidden_states=encoder_output, encoder_attention_mask=image_atts, return_dict=True, mode='fusion', ) itm_embeds = output.last_hidden_state[:, 0] # [CLS] itm_score = F.softmax(self.itm_head(itm_embeds), dim=1)[:, 1] # [bs*5] score = itm_score * self.score_weight + sim * self.similarity_weight pred_answers = score.view(-1, num_options_per_q).max(1)[1].cpu() # assemble predictions ensemble_scores = score.view(-1, num_options_per_q).cpu() # (bsz, 5) out_data_samples = [] for data_sample, ensemble_score, pred_ans in \ zip(data_samples, ensemble_scores, pred_answers): data_sample.pred_label = pred_ans.item() data_sample.score = ensemble_score.numpy() out_data_samples.append(data_sample) return out_data_samples
Predict captions from a batch of inputs. Args: images (torch.Tensor): The input images tensor with shape (N, C, ...) in general. data_samples (List[DataSample], optional): The annotation data of every samples. Defaults to None. **kwargs: Other keyword arguments accepted by the ``predict`` Returns: List[ActionDataSample]: Return list of data samples.
predict
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret_mc.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret_mc.py
Apache-2.0
def extract_feat(self, inputs: torch.Tensor, **kwargs) -> ForwardResults: """Extract features from raw inputs."""
Extract features from raw inputs.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py
Apache-2.0