code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def predict_by_feat(self,
rois: Tuple[Tensor],
cls_scores: Tuple[Tensor],
batch_img_metas: List[dict],
rcnn_test_cfg: Optional[ConfigDict] = None,
**kwargs) -> InstanceList:
"""Transform a batch of output features extracted from the head into
bbox results.
Args:
rois (tuple[Tensor]): Tuple of boxes to be transformed.
Each has shape (num_boxes, 5). last dimension 5 arrange as
(batch_index, x1, y1, x2, y2).
cls_scores (tuple[Tensor]): Tuple of box scores, each has shape
(num_boxes, num_classes + 1).
bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each
has shape (num_boxes, num_classes * 4).
batch_img_metas (list[dict]): List of image information.
rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Instance segmentation
results of each image after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
result_list = []
for img_id in range(len(batch_img_metas)):
img_meta = batch_img_metas[img_id]
results = self._predict_by_feat_single(
roi=rois[img_id],
cls_score=cls_scores[img_id],
img_meta=img_meta,
rcnn_test_cfg=rcnn_test_cfg,
**kwargs)
result_list.append(results)
return result_list | Transform a batch of output features extracted from the head into
bbox results.
Args:
rois (tuple[Tensor]): Tuple of boxes to be transformed.
Each has shape (num_boxes, 5). last dimension 5 arrange as
(batch_index, x1, y1, x2, y2).
cls_scores (tuple[Tensor]): Tuple of box scores, each has shape
(num_boxes, num_classes + 1).
bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each
has shape (num_boxes, num_classes * 4).
batch_img_metas (list[dict]): List of image information.
rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Instance segmentation
results of each image after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2). | predict_by_feat | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/bbox_heads/bbox_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py | Apache-2.0 |
def _predict_by_feat_single(self,
roi: Tensor,
cls_score: Tensor,
img_meta: dict,
rcnn_test_cfg: Optional[ConfigDict] = None,
**kwargs) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox results.
Args:
roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
cls_score (Tensor): Box scores, has shape
(num_boxes, num_classes + 1).
bbox_pred (Tensor): Box energies / deltas.
has shape (num_boxes, num_classes * 4).
img_meta (dict): image information.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
Defaults to None
Returns:
:obj:`InstanceData`: Detection results of each image\
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
results = InstanceData()
# might be used by testing w. augmentation
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
# Handle Multi/Single Label
if cls_score is not None:
if self.multilabel:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(dim=-1)
else:
scores = None
bboxes = roi[:, 1:]
assert bboxes.shape[-1] == 4
# First reverse the flip
img_h, img_w = img_meta['img_shape']
if img_meta.get('flip', False):
bboxes_ = bboxes.clone()
bboxes_[:, 0] = img_w - 1 - bboxes[:, 2]
bboxes_[:, 2] = img_w - 1 - bboxes[:, 0]
bboxes = bboxes_
# Then normalize the bbox to [0, 1]
bboxes[:, 0::2] /= img_w
bboxes[:, 1::2] /= img_h
def _bbox_crop_undo(bboxes, crop_quadruple):
decropped = bboxes.clone()
if crop_quadruple is not None:
x1, y1, tw, th = crop_quadruple
decropped[:, 0::2] = bboxes[..., 0::2] * tw + x1
decropped[:, 1::2] = bboxes[..., 1::2] * th + y1
return decropped
crop_quadruple = img_meta.get('crop_quadruple', np.array([0, 0, 1, 1]))
bboxes = _bbox_crop_undo(bboxes, crop_quadruple)
results.bboxes = bboxes
results.scores = scores
return results | Transform a single image's features extracted from the head into
bbox results.
Args:
roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
cls_score (Tensor): Box scores, has shape
(num_boxes, num_classes + 1).
bbox_pred (Tensor): Box energies / deltas.
has shape (num_boxes, num_classes * 4).
img_meta (dict): image information.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
Defaults to None
Returns:
:obj:`InstanceData`: Detection results of each image\
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2). | _predict_by_feat_single | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/bbox_heads/bbox_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py | Apache-2.0 |
def conv_branch_init(conv: nn.Module, branches: int) -> None:
"""Perform initialization for a conv branch.
Args:
conv (nn.Module): The conv module of a branch.
branches (int): The number of branches.
"""
weight = conv.weight
n = weight.size(0)
k1 = weight.size(1)
k2 = weight.size(2)
nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches)))
nn.init.constant_(conv.bias, 0) | Perform initialization for a conv branch.
Args:
conv (nn.Module): The conv module of a branch.
branches (int): The number of branches. | conv_branch_init | python | open-mmlab/mmaction2 | mmaction/engine/model/weight_init.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/model/weight_init.py | Apache-2.0 |
def _draw_samples(self,
batch_idx: int,
data_batch: dict,
data_samples: Sequence[ActionDataSample],
step: int = 0) -> None:
"""Visualize every ``self.interval`` samples from a data batch.
Args:
batch_idx (int): The index of the current batch in the val loop.
data_batch (dict): Data from dataloader.
outputs (Sequence[:obj:`ActionDataSample`]): Outputs from model.
step (int): Global step value to record. Defaults to 0.
"""
if self.enable is False:
return
batch_size = len(data_samples)
videos = data_batch['inputs']
start_idx = batch_size * batch_idx
end_idx = start_idx + batch_size
# The first index divisible by the interval, after the start index
first_sample_id = math.ceil(start_idx / self.interval) * self.interval
for sample_id in range(first_sample_id, end_idx, self.interval):
video = videos[sample_id - start_idx]
# move channel to the last
video = video.permute(1, 2, 3, 0).numpy().astype('uint8')
data_sample = data_samples[sample_id - start_idx]
if 'filename' in data_sample:
# osp.basename works on different platforms even file clients.
sample_name = osp.basename(data_sample.get('filename'))
elif 'frame_dir' in data_sample:
sample_name = osp.basename(data_sample.get('frame_dir'))
else:
sample_name = str(sample_id)
draw_args = self.draw_args
if self.out_dir is not None:
draw_args['out_path'] = self.file_client.join_path(
self.out_dir, f'{sample_name}_{step}')
self._visualizer.add_datasample(
sample_name,
video=video,
data_sample=data_sample,
step=step,
**self.draw_args,
) | Visualize every ``self.interval`` samples from a data batch.
Args:
batch_idx (int): The index of the current batch in the val loop.
data_batch (dict): Data from dataloader.
outputs (Sequence[:obj:`ActionDataSample`]): Outputs from model.
step (int): Global step value to record. Defaults to 0. | _draw_samples | python | open-mmlab/mmaction2 | mmaction/engine/hooks/visualization_hook.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/hooks/visualization_hook.py | Apache-2.0 |
def after_val_iter(self, runner: Runner, batch_idx: int, data_batch: dict,
outputs: Sequence[ActionDataSample]) -> None:
"""Visualize every ``self.interval`` samples during validation.
Args:
runner (:obj:`Runner`): The runner of the validation process.
batch_idx (int): The index of the current batch in the val loop.
data_batch (dict): Data from dataloader.
outputs (Sequence[:obj:`ActionDataSample`]): Outputs from model.
"""
if isinstance(runner.train_loop, EpochBasedTrainLoop):
step = runner.epoch
else:
step = runner.iter
self._draw_samples(batch_idx, data_batch, outputs, step=step) | Visualize every ``self.interval`` samples during validation.
Args:
runner (:obj:`Runner`): The runner of the validation process.
batch_idx (int): The index of the current batch in the val loop.
data_batch (dict): Data from dataloader.
outputs (Sequence[:obj:`ActionDataSample`]): Outputs from model. | after_val_iter | python | open-mmlab/mmaction2 | mmaction/engine/hooks/visualization_hook.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/hooks/visualization_hook.py | Apache-2.0 |
def after_test_iter(self, runner: Runner, batch_idx: int, data_batch: dict,
outputs: Sequence[ActionDataSample]) -> None:
"""Visualize every ``self.interval`` samples during test.
Args:
runner (:obj:`Runner`): The runner of the testing process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (dict): Data from dataloader.
outputs (Sequence[:obj:`DetDataSample`]): Outputs from model.
"""
self._draw_samples(batch_idx, data_batch, outputs, step=0) | Visualize every ``self.interval`` samples during test.
Args:
runner (:obj:`Runner`): The runner of the testing process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (dict): Data from dataloader.
outputs (Sequence[:obj:`DetDataSample`]): Outputs from model. | after_test_iter | python | open-mmlab/mmaction2 | mmaction/engine/hooks/visualization_hook.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/hooks/visualization_hook.py | Apache-2.0 |
def get_layer_id_for_vit(var_name: str, max_layer_id: int) -> int:
"""Get the layer id to set the different learning rates for ViT.
Args:
var_name (str): The key of the model.
num_max_layer (int): Maximum number of backbone layers.
Returns:
int: Returns the layer id of the key.
"""
if var_name in ('backbone.cls_token', 'backbone.mask_token',
'backbone.pos_embed'):
return 0
elif var_name.startswith('backbone.patch_embed'):
return 0
elif var_name.startswith('backbone.blocks'):
layer_id = int(var_name.split('.')[2])
return layer_id + 1
else:
return max_layer_id + 1 | Get the layer id to set the different learning rates for ViT.
Args:
var_name (str): The key of the model.
num_max_layer (int): Maximum number of backbone layers.
Returns:
int: Returns the layer id of the key. | get_layer_id_for_vit | python | open-mmlab/mmaction2 | mmaction/engine/optimizers/layer_decay_optim_wrapper_constructor.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/optimizers/layer_decay_optim_wrapper_constructor.py | Apache-2.0 |
def get_layer_id_for_mvit(var_name, max_layer_id):
"""Get the layer id to set the different learning rates in ``layer_wise``
decay_type.
Args:
var_name (str): The key of the model.
max_layer_id (int): Maximum layer id.
Returns:
int: The id number corresponding to different learning rate in
``LearningRateDecayOptimizerConstructor``.
"""
if var_name in ('backbone.cls_token', 'backbone.mask_token',
'backbone.pos_embed'):
return 0
elif var_name.startswith('backbone.patch_embed'):
return 0
elif var_name.startswith('backbone.blocks'):
layer_id = int(var_name.split('.')[2]) + 1
return layer_id
else:
return max_layer_id + 1 | Get the layer id to set the different learning rates in ``layer_wise``
decay_type.
Args:
var_name (str): The key of the model.
max_layer_id (int): Maximum layer id.
Returns:
int: The id number corresponding to different learning rate in
``LearningRateDecayOptimizerConstructor``. | get_layer_id_for_mvit | python | open-mmlab/mmaction2 | mmaction/engine/optimizers/layer_decay_optim_wrapper_constructor.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/optimizers/layer_decay_optim_wrapper_constructor.py | Apache-2.0 |
def add_params(self, params: List[dict], module: nn.Module,
**kwargs) -> None:
"""Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
"""
logger = MMLogger.get_current_instance()
parameter_groups = {}
logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}')
num_layers = self.paramwise_cfg.get('num_layers')
decay_rate = self.paramwise_cfg.get('decay_rate')
decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise')
logger.info('Build LearningRateDecayOptimizerConstructor '
f'{decay_type} {decay_rate} - {num_layers}')
weight_decay = self.base_wd
for m in module.modules():
assert not isinstance(m, nn.modules.batchnorm._NormBase
), 'BN is not supported with layer decay'
for name, param in module.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith('.bias'):
group_name = 'no_decay'
this_weight_decay = 0.
else:
group_name = 'decay'
this_weight_decay = weight_decay
if 'layer_wise' in decay_type:
if 'MViT' in module.backbone.__class__.__name__:
layer_id = get_layer_id_for_mvit(
name, self.paramwise_cfg.get('num_layers'))
logger.info(f'set param {name} as id {layer_id}')
elif 'VisionTransformer' in module.backbone.__class__.__name__:
layer_id = get_layer_id_for_vit(name, num_layers)
logger.info(f'set param {name} as id {layer_id}')
else:
raise NotImplementedError()
else:
raise NotImplementedError(f'Only support layer wise decay,'
f'but got {decay_type}')
group_name = f'layer_{layer_id}_{group_name}'
if group_name not in parameter_groups:
scale = decay_rate**(num_layers - layer_id + 1)
parameter_groups[group_name] = {
'weight_decay': this_weight_decay,
'params': [],
'param_names': [],
'lr_scale': scale,
'group_name': group_name,
'lr': scale * self.base_lr,
}
parameter_groups[group_name]['params'].append(param)
parameter_groups[group_name]['param_names'].append(name)
rank, _ = get_dist_info()
if rank == 0:
to_display = {}
for key in parameter_groups:
to_display[key] = {
'param_names': parameter_groups[key]['param_names'],
'lr_scale': parameter_groups[key]['lr_scale'],
'lr': parameter_groups[key]['lr'],
'weight_decay': parameter_groups[key]['weight_decay'],
}
logger.info(f'Param groups = {json.dumps(to_display, indent=2)}')
params.extend(parameter_groups.values()) | Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added. | add_params | python | open-mmlab/mmaction2 | mmaction/engine/optimizers/layer_decay_optim_wrapper_constructor.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/optimizers/layer_decay_optim_wrapper_constructor.py | Apache-2.0 |
def add_params(self,
params: List[dict],
module: nn.Module,
prefix: str = 'base',
**kwargs) -> None:
"""Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
prefix (str): The prefix of the module. Defaults to ``'base'``.
"""
for name, param in module.named_parameters(recurse=False):
param_group = {'params': [param]}
if not param.requires_grad:
params.append(param_group)
continue
param_group['lr'] = self.base_lr
if self.base_wd is not None:
param_group['weight_decay'] = self.base_wd
processing_keys = [
key for key in self.paramwise_cfg if key in f'{prefix}.{name}'
]
if processing_keys:
param_group['lr'] *= \
reduce(mul, [self.paramwise_cfg[key].get('lr_mult', 1.)
for key in processing_keys])
if self.base_wd is not None:
param_group['weight_decay'] *= \
reduce(mul, [self.paramwise_cfg[key].
get('decay_mult', 1.)
for key in processing_keys])
params.append(param_group)
for key, value in param_group.items():
if key == 'params':
continue
full_name = f'{prefix}.{name}' if prefix else name
print_log(
f'paramwise_options -- '
f'{full_name}: {key} = {round(value, 8)}',
logger='current')
for child_name, child_mod in module.named_children():
child_prefix = f'{prefix}.{child_name}' if prefix else child_name
self.add_params(params, child_mod, prefix=child_prefix) | Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
prefix (str): The prefix of the module. Defaults to ``'base'``. | add_params | python | open-mmlab/mmaction2 | mmaction/engine/optimizers/swin_optim_wrapper_constructor.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/optimizers/swin_optim_wrapper_constructor.py | Apache-2.0 |
def add_params(self, params, model, **kwargs):
"""Add parameters and their corresponding lr and wd to the params.
Args:
params (list): The list to be modified, containing all parameter
groups and their corresponding lr and wd configurations.
model (nn.Module): The model to be trained with the optimizer.
"""
# use fc_lr5 to determine whether to specify higher multi-factor
# for fc layer weights and bias.
fc_lr5 = self.paramwise_cfg['fc_lr5']
first_conv_weight = []
first_conv_bias = []
normal_weight = []
normal_bias = []
lr5_weight = []
lr10_bias = []
bn = []
conv_cnt = 0
for m in model.modules():
if isinstance(m, _ConvNd):
m_params = list(m.parameters())
conv_cnt += 1
if conv_cnt == 1:
first_conv_weight.append(m_params[0])
if len(m_params) == 2:
first_conv_bias.append(m_params[1])
else:
normal_weight.append(m_params[0])
if len(m_params) == 2:
normal_bias.append(m_params[1])
elif isinstance(m, torch.nn.Linear):
m_params = list(m.parameters())
normal_weight.append(m_params[0])
if len(m_params) == 2:
normal_bias.append(m_params[1])
elif isinstance(m,
(_BatchNorm, SyncBatchNorm_, torch.nn.GroupNorm)):
for param in list(m.parameters()):
if param.requires_grad:
bn.append(param)
elif len(m._modules) == 0:
if len(list(m.parameters())) > 0:
raise ValueError(f'New atomic module type: {type(m)}. '
'Need to give it a learning policy')
# pop the cls_head fc layer params
last_fc_weight = normal_weight.pop()
last_fc_bias = normal_bias.pop()
if fc_lr5:
lr5_weight.append(last_fc_weight)
lr10_bias.append(last_fc_bias)
else:
normal_weight.append(last_fc_weight)
normal_bias.append(last_fc_bias)
params.append({
'params': first_conv_weight,
'lr': self.base_lr,
'weight_decay': self.base_wd
})
params.append({
'params': first_conv_bias,
'lr': self.base_lr * 2,
'weight_decay': 0
})
params.append({
'params': normal_weight,
'lr': self.base_lr,
'weight_decay': self.base_wd
})
params.append({
'params': normal_bias,
'lr': self.base_lr * 2,
'weight_decay': 0
})
params.append({'params': bn, 'lr': self.base_lr, 'weight_decay': 0})
params.append({
'params': lr5_weight,
'lr': self.base_lr * 5,
'weight_decay': self.base_wd
})
params.append({
'params': lr10_bias,
'lr': self.base_lr * 10,
'weight_decay': 0
}) | Add parameters and their corresponding lr and wd to the params.
Args:
params (list): The list to be modified, containing all parameter
groups and their corresponding lr and wd configurations.
model (nn.Module): The model to be trained with the optimizer. | add_params | python | open-mmlab/mmaction2 | mmaction/engine/optimizers/tsm_optim_wrapper_constructor.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/optimizers/tsm_optim_wrapper_constructor.py | Apache-2.0 |
def run(self) -> dict:
"""Launch val."""
self.runner.call_hook('before_val')
self.runner.call_hook('before_val_epoch')
self.runner.model.eval()
feats_local = []
data_samples_local = []
for idx, data_batch in enumerate(self.dataloader):
with torch.no_grad():
self.runner.call_hook(
'before_val_iter', batch_idx=idx, data_batch=data_batch)
# predictions should be sequence of BaseDataElement
with autocast(enabled=self.fp16):
if is_model_wrapper(self.runner.model):
data_preprocessor = self.runner.model.module.data_preprocessor # noqa: E501
else:
data_preprocessor = self.runner.model.data_preprocessor
# get features for retrieval instead of data samples
data_batch = data_preprocessor(data_batch, False)
feats = self.runner.model._run_forward(
data_batch, mode='tensor')
feats_local.append(feats)
data_samples_local.extend(data_batch['data_samples'])
self.runner.call_hook(
'after_val_iter',
batch_idx=idx,
data_batch=data_batch,
outputs=feats)
# concatenate different features
feats_local = {
k: torch.cat([dic[k] for dic in feats_local])
for k in feats_local[0]
}
# get predictions
if is_model_wrapper(self.runner.model):
predict_all_fn = self.runner.model.module.predict_all
else:
predict_all_fn = self.runner.model.predict_all
num_videos = self.dataloader.dataset.num_videos
num_texts = self.dataloader.dataset.num_texts
with torch.no_grad():
with autocast(enabled=self.fp16):
i2t_data_samples, t2i_data_samples = predict_all_fn(
feats_local,
data_samples_local,
num_images=num_videos,
num_texts=num_texts,
)
# process in evaluator and compute metrics
self.evaluator.process(i2t_data_samples, None)
i2t_metrics = self.evaluator.evaluate(num_videos)
i2t_metrics = {f'i2t/{k}': v for k, v in i2t_metrics.items()}
self.evaluator.process(t2i_data_samples, None)
t2i_metrics = self.evaluator.evaluate(num_texts)
t2i_metrics = {f't2i/{k}': v for k, v in t2i_metrics.items()}
metrics = {**i2t_metrics, **t2i_metrics}
self.runner.call_hook('after_val_epoch', metrics=metrics)
self.runner.call_hook('after_val')
return metrics | Launch val. | run | python | open-mmlab/mmaction2 | mmaction/engine/runner/retrieval_loop.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/runner/retrieval_loop.py | Apache-2.0 |
def run(self) -> dict:
"""Launch test."""
self.runner.call_hook('before_test')
self.runner.call_hook('before_test_epoch')
self.runner.model.eval()
feats_local = []
data_samples_local = []
for idx, data_batch in enumerate(self.dataloader):
with torch.no_grad():
self.runner.call_hook(
'before_test_iter', batch_idx=idx, data_batch=data_batch)
# predictions should be sequence of BaseDataElement
with autocast(enabled=self.fp16):
if is_model_wrapper(self.runner.model):
data_preprocessor = self.runner.model.module.data_preprocessor # noqa: E501
else:
data_preprocessor = self.runner.model.data_preprocessor
# get features for retrieval instead of data samples
data_batch = data_preprocessor(data_batch, False)
feats = self.runner.model._run_forward(
data_batch, mode='tensor')
feats_local.append(feats)
data_samples_local.extend(data_batch['data_samples'])
self.runner.call_hook(
'after_test_iter',
batch_idx=idx,
data_batch=data_batch,
outputs=feats)
# concatenate different features
feats_local = {
k: torch.cat([dic[k] for dic in feats_local])
for k in feats_local[0]
}
# get predictions
if is_model_wrapper(self.runner.model):
predict_all_fn = self.runner.model.module.predict_all
else:
predict_all_fn = self.runner.model.predict_all
num_videos = self.dataloader.dataset.num_videos
num_texts = self.dataloader.dataset.num_texts
with torch.no_grad():
with autocast(enabled=self.fp16):
i2t_data_samples, t2i_data_samples = predict_all_fn(
feats_local,
data_samples_local,
num_images=num_videos,
num_texts=num_texts,
)
# process in evaluator and compute metrics
self.evaluator.process(i2t_data_samples, None)
i2t_metrics = self.evaluator.evaluate(num_videos)
i2t_metrics = {f'i2t/{k}': v for k, v in i2t_metrics.items()}
self.evaluator.process(t2i_data_samples, None)
t2i_metrics = self.evaluator.evaluate(num_texts)
t2i_metrics = {f't2i/{k}': v for k, v in t2i_metrics.items()}
metrics = {**i2t_metrics, **t2i_metrics}
self.runner.call_hook('after_test_epoch', metrics=metrics)
self.runner.call_hook('after_test')
return metrics | Launch test. | run | python | open-mmlab/mmaction2 | mmaction/engine/runner/retrieval_loop.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/runner/retrieval_loop.py | Apache-2.0 |
def num_loaders(self):
"""The number of dataloaders."""
return len(self._dataloaders) | The number of dataloaders. | num_loaders | python | open-mmlab/mmaction2 | mmaction/engine/runner/multi_loop.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/runner/multi_loop.py | Apache-2.0 |
def __iter__(self):
"""Return self when executing __iter__."""
return self | Return self when executing __iter__. | __iter__ | python | open-mmlab/mmaction2 | mmaction/engine/runner/multi_loop.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/runner/multi_loop.py | Apache-2.0 |
def __next__(self):
"""Get the next iter's data of multiple loaders."""
data = tuple([next(loader) for loader in self.iter_loaders])
return data | Get the next iter's data of multiple loaders. | __next__ | python | open-mmlab/mmaction2 | mmaction/engine/runner/multi_loop.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/runner/multi_loop.py | Apache-2.0 |
def __len__(self):
"""Get the length of loader."""
return min([len(loader) for loader in self._dataloaders]) | Get the length of loader. | __len__ | python | open-mmlab/mmaction2 | mmaction/engine/runner/multi_loop.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/runner/multi_loop.py | Apache-2.0 |
def run_epoch(self) -> None:
"""Iterate one epoch."""
self.runner.call_hook('before_train_epoch')
self.runner.model.train()
gc.collect()
for loader in self.multi_loaders:
if hasattr(loader, 'sampler') and hasattr(loader.sampler,
'set_epoch'):
loader.sampler.set_epoch(self._epoch)
for idx, data_batch in enumerate(EpochMultiLoader(self.multi_loaders)):
self.run_iter(idx, data_batch)
self.runner.call_hook('after_train_epoch')
self._epoch += 1 | Iterate one epoch. | run_epoch | python | open-mmlab/mmaction2 | mmaction/engine/runner/multi_loop.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/runner/multi_loop.py | Apache-2.0 |
def _get_adaptive_scale(img_shape: Tuple[int, int],
min_scale: float = 0.3,
max_scale: float = 3.0) -> float:
"""Get adaptive scale according to frame shape.
The target scale depends on the the short edge length of the frame. If the
short edge length equals 224, the output is 1.0. And output linear scales
according the short edge length.
You can also specify the minimum scale and the maximum scale to limit the
linear scale.
Args:
img_shape (Tuple[int, int]): The shape of the canvas frame.
min_size (int): The minimum scale. Defaults to 0.3.
max_size (int): The maximum scale. Defaults to 3.0.
Returns:
int: The adaptive scale.
"""
short_edge_length = min(img_shape)
scale = short_edge_length / 224.
return min(max(scale, min_scale), max_scale) | Get adaptive scale according to frame shape.
The target scale depends on the the short edge length of the frame. If the
short edge length equals 224, the output is 1.0. And output linear scales
according the short edge length.
You can also specify the minimum scale and the maximum scale to limit the
linear scale.
Args:
img_shape (Tuple[int, int]): The shape of the canvas frame.
min_size (int): The minimum scale. Defaults to 0.3.
max_size (int): The maximum scale. Defaults to 3.0.
Returns:
int: The adaptive scale. | _get_adaptive_scale | python | open-mmlab/mmaction2 | mmaction/visualization/action_visualizer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/visualization/action_visualizer.py | Apache-2.0 |
def _load_video(self,
video: Union[np.ndarray, Sequence[np.ndarray], str],
target_resolution: Optional[Tuple[int]] = None):
"""Load video from multiple source and convert to target resolution.
Args:
video (np.ndarray, str): The video to draw.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
"""
if isinstance(video, np.ndarray) or isinstance(video, list):
frames = video
elif isinstance(video, str):
# video file path
if isfile(video):
try:
import decord
except ImportError:
raise ImportError(
'Please install decord to load video file.')
video = decord.VideoReader(video)
frames = [x.asnumpy()[..., ::-1] for x in video]
# rawframes folder path
elif isdir(video):
frame_list = sorted(list_dir_or_file(video, list_dir=False))
frames = [mmcv.imread(join_path(video, x)) for x in frame_list]
else:
raise TypeError(f'type of video {type(video)} not supported')
if target_resolution is not None:
w, h = target_resolution
frame_h, frame_w, _ = frames[0].shape
if w == -1:
w = int(h / frame_h * frame_w)
if h == -1:
h = int(w / frame_w * frame_h)
frames = [mmcv.imresize(f, (w, h)) for f in frames]
return frames | Load video from multiple source and convert to target resolution.
Args:
video (np.ndarray, str): The video to draw.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None. | _load_video | python | open-mmlab/mmaction2 | mmaction/visualization/action_visualizer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/visualization/action_visualizer.py | Apache-2.0 |
def add_video(
self,
name: str,
image: np.ndarray,
step: int = 0,
fps: int = 4,
out_type: str = 'img',
) -> None:
"""Record the image.
Args:
name (str): The image identifier.
image (np.ndarray, optional): The image to be saved. The format
should be RGB. Default to None.
step (int): Global step value to record. Default to 0.
fps (int): Frames per second for saving video. Defaults to 4.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
"""
for vis_backend in self._vis_backends.values():
vis_backend.add_video(
name, image, step=step, fps=fps,
out_type=out_type) # type: ignore | Record the image.
Args:
name (str): The image identifier.
image (np.ndarray, optional): The image to be saved. The format
should be RGB. Default to None.
step (int): Global step value to record. Default to 0.
fps (int): Frames per second for saving video. Defaults to 4.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``. | add_video | python | open-mmlab/mmaction2 | mmaction/visualization/action_visualizer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/visualization/action_visualizer.py | Apache-2.0 |
def add_video(self,
name: str,
frames: np.ndarray,
step: int = 0,
fps: Optional[int] = 4,
out_type: Optional[int] = 'img',
**kwargs) -> None:
"""Record the frames of a video to disk.
Args:
name (str): The video identifier (frame folder).
frames (np.ndarray): The frames to be saved. The format
should be RGB. The shape should be (T, H, W, C).
step (int): Global step value to record. Defaults to 0.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
fps (int): Frames per second for saving video. Defaults to 4.
"""
assert frames.dtype == np.uint8
if out_type == 'img':
frames_dir = osp.join(self._save_dir, name, f'frames_{step}')
os.makedirs(frames_dir, exist_ok=True)
for idx, frame in enumerate(frames):
drawn_image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
save_file_name = f'{idx}.png'
cv2.imwrite(osp.join(frames_dir, save_file_name), drawn_image)
else:
try:
from moviepy.editor import ImageSequenceClip
except ImportError:
raise ImportError('Please install moviepy to enable '
'output file.')
frames = [x[..., ::-1] for x in frames]
video_clips = ImageSequenceClip(frames, fps=fps)
name = osp.splitext(name)[0]
if out_type == 'gif':
out_path = osp.join(self._save_dir, name + '.gif')
video_clips.write_gif(out_path, logger=None)
elif out_type == 'video':
out_path = osp.join(self._save_dir, name + '.mp4')
video_clips.write_videofile(
out_path, remove_temp=True, logger=None) | Record the frames of a video to disk.
Args:
name (str): The video identifier (frame folder).
frames (np.ndarray): The frames to be saved. The format
should be RGB. The shape should be (T, H, W, C).
step (int): Global step value to record. Defaults to 0.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
fps (int): Frames per second for saving video. Defaults to 4. | add_video | python | open-mmlab/mmaction2 | mmaction/visualization/video_backend.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/visualization/video_backend.py | Apache-2.0 |
def add_video(self,
name: str,
frames: np.ndarray,
fps: int = 4,
**kwargs) -> None:
"""Record the frames of a video to wandb.
Note that this requires the ``moviepy`` package.
Args:
name (str): The video identifier (frame folder).
frames (np.ndarray): The frames to be saved. The format
should be RGB. The shape should be (T, H, W, C).
step is a useless parameter that Wandb does not need.
fps (int): Frames per second. Defaults to 4.
"""
frames = frames.transpose(0, 3, 1, 2)
self._wandb.log({'video': wandb.Video(frames, fps=fps, format='gif')}) | Record the frames of a video to wandb.
Note that this requires the ``moviepy`` package.
Args:
name (str): The video identifier (frame folder).
frames (np.ndarray): The frames to be saved. The format
should be RGB. The shape should be (T, H, W, C).
step is a useless parameter that Wandb does not need.
fps (int): Frames per second. Defaults to 4. | add_video | python | open-mmlab/mmaction2 | mmaction/visualization/video_backend.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/visualization/video_backend.py | Apache-2.0 |
def add_video(self,
name: str,
frames: np.ndarray,
step: int = 0,
fps: int = 4,
**kwargs) -> None:
"""Record the frames of a video to tensorboard.
Note that this requires the ``moviepy`` package.
Args:
name (str): The video identifier (frame folder).
frames (np.ndarray): The frames to be saved. The format
should be RGB. The shape should be (T, H, W, C).
step (int): Global step value to record. Defaults to 0.
fps (int): Frames per second. Defaults to 4.
"""
frames = frames.transpose(0, 3, 1, 2)
frames = frames.reshape(1, *frames.shape)
self._tensorboard.add_video(name, frames, global_step=step, fps=fps) | Record the frames of a video to tensorboard.
Note that this requires the ``moviepy`` package.
Args:
name (str): The video identifier (frame folder).
frames (np.ndarray): The frames to be saved. The format
should be RGB. The shape should be (T, H, W, C).
step (int): Global step value to record. Defaults to 0.
fps (int): Frames per second. Defaults to 4. | add_video | python | open-mmlab/mmaction2 | mmaction/visualization/video_backend.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/visualization/video_backend.py | Apache-2.0 |
def format_label(value: LABEL_TYPE) -> torch.Tensor:
"""Convert various python types to label-format tensor.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int`.
Args:
value (torch.Tensor | numpy.ndarray | Sequence | int): Label value.
Returns:
:obj:`torch.Tensor`: The formatted label tensor.
"""
# Handle single number
if isinstance(value, (torch.Tensor, np.ndarray)) and value.ndim == 0:
value = int(value.item())
if isinstance(value, np.ndarray):
value = torch.from_numpy(value).to(torch.long)
elif isinstance(value, Sequence) and not is_str(value):
value = torch.tensor(value).to(torch.long)
elif isinstance(value, int):
value = torch.LongTensor([value])
elif not isinstance(value, torch.Tensor):
raise TypeError(f'Type {type(value)} is not an available label type.')
return value | Convert various python types to label-format tensor.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int`.
Args:
value (torch.Tensor | numpy.ndarray | Sequence | int): Label value.
Returns:
:obj:`torch.Tensor`: The formatted label tensor. | format_label | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def format_score(value: SCORE_TYPE) -> Union[torch.Tensor, Dict]:
"""Convert various python types to score-format tensor.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`.
Args:
value (torch.Tensor | numpy.ndarray | Sequence | dict):
Score values or dict of scores values.
Returns:
:obj:`torch.Tensor` | dict: The formatted scores.
"""
if isinstance(value, np.ndarray):
value = torch.from_numpy(value).float()
elif isinstance(value, Sequence) and not is_str(value):
value = torch.tensor(value).float()
elif isinstance(value, dict):
for k, v in value.items():
value[k] = format_score(v)
elif not isinstance(value, torch.Tensor):
raise TypeError(f'Type {type(value)} is not an available label type.')
return value | Convert various python types to score-format tensor.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`.
Args:
value (torch.Tensor | numpy.ndarray | Sequence | dict):
Score values or dict of scores values.
Returns:
:obj:`torch.Tensor` | dict: The formatted scores. | format_score | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def set_gt_label(self, value: LABEL_TYPE) -> 'ActionDataSample':
"""Set `gt_label``."""
self.set_field(format_label(value), 'gt_label', dtype=torch.Tensor)
return self | Set `gt_label``. | set_gt_label | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def set_pred_label(self, value: LABEL_TYPE) -> 'ActionDataSample':
"""Set ``pred_label``."""
self.set_field(format_label(value), 'pred_label', dtype=torch.Tensor)
return self | Set ``pred_label``. | set_pred_label | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def set_pred_score(self, value: SCORE_TYPE) -> 'ActionDataSample':
"""Set score of ``pred_label``."""
score = format_score(value)
self.set_field(score, 'pred_score')
if hasattr(self, 'num_classes'):
assert len(score) == self.num_classes, \
f'The length of score {len(score)} should be '\
f'equal to the num_classes {self.num_classes}.'
else:
self.set_field(
name='num_classes', value=len(score), field_type='metainfo')
return self | Set score of ``pred_label``. | set_pred_score | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def proposals(self):
"""Property of `proposals`"""
return self._proposals | Property of `proposals` | proposals | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def proposals(self, value):
"""Setter of `proposals`"""
self.set_field(value, '_proposals', dtype=InstanceData) | Setter of `proposals` | proposals | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def proposals(self):
"""Deleter of `proposals`"""
del self._proposals | Deleter of `proposals` | proposals | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def gt_instances(self):
"""Property of `gt_instances`"""
return self._gt_instances | Property of `gt_instances` | gt_instances | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def gt_instances(self, value):
"""Setter of `gt_instances`"""
self.set_field(value, '_gt_instances', dtype=InstanceData) | Setter of `gt_instances` | gt_instances | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def gt_instances(self):
"""Deleter of `gt_instances`"""
del self._gt_instances | Deleter of `gt_instances` | gt_instances | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def features(self):
"""Setter of `features`"""
return self._features | Setter of `features` | features | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def features(self, value):
"""Setter of `features`"""
self.set_field(value, '_features', dtype=InstanceData) | Setter of `features` | features | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def features(self):
"""Deleter of `features`"""
del self._features | Deleter of `features` | features | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def bbox_target(pos_bboxes_list: List[torch.Tensor],
neg_bboxes_list: List[torch.Tensor],
gt_labels: List[torch.Tensor],
cfg: Union[dict, mmengine.ConfigDict]) -> tuple:
"""Generate classification targets for bboxes.
Args:
pos_bboxes_list (List[torch.Tensor]): Positive bboxes list.
neg_bboxes_list (List[torch.Tensor]): Negative bboxes list.
gt_labels (List[torch.Tensor]): Groundtruth classification label list.
cfg (dict | mmengine.ConfigDict): RCNN config.
Returns:
tuple: Label and label_weight for bboxes.
"""
labels, label_weights = [], []
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
assert len(pos_bboxes_list) == len(neg_bboxes_list) == len(gt_labels)
length = len(pos_bboxes_list)
for i in range(length):
pos_bboxes = pos_bboxes_list[i]
neg_bboxes = neg_bboxes_list[i]
gt_label = gt_labels[i]
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
label = F.pad(gt_label, (0, 0, 0, num_neg))
label_weight = pos_bboxes.new_zeros(num_samples)
label_weight[:num_pos] = pos_weight
label_weight[-num_neg:] = 1.
labels.append(label)
label_weights.append(label_weight)
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
return labels, label_weights | Generate classification targets for bboxes.
Args:
pos_bboxes_list (List[torch.Tensor]): Positive bboxes list.
neg_bboxes_list (List[torch.Tensor]): Negative bboxes list.
gt_labels (List[torch.Tensor]): Groundtruth classification label list.
cfg (dict | mmengine.ConfigDict): RCNN config.
Returns:
tuple: Label and label_weight for bboxes. | bbox_target | python | open-mmlab/mmaction2 | mmaction/structures/bbox/bbox_target.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/bbox/bbox_target.py | Apache-2.0 |
def bbox2result(bboxes: torch.Tensor,
labels: torch.Tensor,
num_classes: int,
thr: float = 0.01) -> list:
"""Convert detection results to a list of numpy arrays.
This identifies single-label classification (as opposed to multi-label)
through the thr parameter which is set to a negative value.
ToDo: The ideal way would be for this to be automatically set when the
Currently, the way to set this is to set ``test_cfg.rcnn.action_thr=-1.0``
model cfg uses multilabel=False, however this could be a breaking change
and is left as a future exercise.
NB - this should not interfere with the evaluation in any case.
Args:
bboxes (torch.Tensor): shape ``(n, 4)``.
labels (torch.Tensor): shape ``(n, num_classes)``.
num_classes (int): class number, including background class.
thr (float): The score threshold used when converting predictions to
detection results. If a single negative value, uses single-label
classification.
Returns:
List(ndarray): bbox results of each class.
"""
if bboxes.shape[0] == 0:
return list(np.zeros((num_classes - 1, 0, 5), dtype=np.float32))
bboxes = bboxes.cpu().numpy()
scores = labels.cpu().numpy() # rename for clarification
# Although we can handle single-label classification, we still want scores
assert scores.shape[-1] > 1
# Robustly check for multi/single-label:
if not hasattr(thr, '__len__'):
multilabel = thr >= 0
thr = (thr, ) * num_classes
else:
multilabel = True
# Check Shape
assert scores.shape[1] == num_classes
assert len(thr) == num_classes
result = []
for i in range(num_classes - 1):
if multilabel:
where = (scores[:, i + 1] > thr[i + 1])
else:
where = (scores[:, 1:].argmax(axis=1) == i)
result.append(
np.concatenate((bboxes[where, :4], scores[where, i + 1:i + 2]),
axis=1))
return result | Convert detection results to a list of numpy arrays.
This identifies single-label classification (as opposed to multi-label)
through the thr parameter which is set to a negative value.
ToDo: The ideal way would be for this to be automatically set when the
Currently, the way to set this is to set ``test_cfg.rcnn.action_thr=-1.0``
model cfg uses multilabel=False, however this could be a breaking change
and is left as a future exercise.
NB - this should not interfere with the evaluation in any case.
Args:
bboxes (torch.Tensor): shape ``(n, 4)``.
labels (torch.Tensor): shape ``(n, num_classes)``.
num_classes (int): class number, including background class.
thr (float): The score threshold used when converting predictions to
detection results. If a single negative value, uses single-label
classification.
Returns:
List(ndarray): bbox results of each class. | bbox2result | python | open-mmlab/mmaction2 | mmaction/structures/bbox/transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/bbox/transforms.py | Apache-2.0 |
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True | Check if norm layer is in correct train state. | check_norm_state | python | open-mmlab/mmaction2 | mmaction/testing/_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/testing/_utils.py | Apache-2.0 |
def generate_backbone_demo_inputs(input_shape=(1, 3, 64, 64)):
"""Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Defaults to ``(1, 3, 64, 64)``.
"""
imgs = np.random.random(input_shape)
imgs = torch.FloatTensor(imgs)
return imgs | Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Defaults to ``(1, 3, 64, 64)``. | generate_backbone_demo_inputs | python | open-mmlab/mmaction2 | mmaction/testing/_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/testing/_utils.py | Apache-2.0 |
def generate_recognizer_demo_inputs(
input_shape=(1, 3, 3, 224, 224), model_type='2D'):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 250, 3, 224, 224).
model_type (str): Model type for data generation, from {'2D', '3D'}.
Default:'2D'
"""
if len(input_shape) == 5:
(N, L, _, _, _) = input_shape
elif len(input_shape) == 6:
(N, M, _, L, _, _) = input_shape
imgs = np.random.random(input_shape)
if model_type == '2D' or model_type == 'skeleton':
gt_labels = torch.LongTensor([2] * N)
elif model_type == '3D':
gt_labels = torch.LongTensor([2] * M)
elif model_type == 'audio':
gt_labels = torch.LongTensor([2] * L)
else:
raise ValueError(f'Data type {model_type} is not available')
inputs = {'imgs': torch.FloatTensor(imgs), 'gt_labels': gt_labels}
return inputs | Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 250, 3, 224, 224).
model_type (str): Model type for data generation, from {'2D', '3D'}.
Default:'2D' | generate_recognizer_demo_inputs | python | open-mmlab/mmaction2 | mmaction/testing/_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/testing/_utils.py | Apache-2.0 |
def get_cfg(config_type, fname):
"""Grab configs necessary to create a recognizer.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config_types = ('recognition', 'recognition_audio', 'localization',
'detection', 'skeleton', 'retrieval')
assert config_type in config_types
repo_dpath = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dpath, 'configs/' + config_type)
config_fpath = osp.join(config_dpath, fname)
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config = mmengine.Config.fromfile(config_fpath)
return config | Grab configs necessary to create a recognizer.
These are deep copied to allow for safe modification of parameters without
influencing other tests. | get_cfg | python | open-mmlab/mmaction2 | mmaction/testing/_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/testing/_utils.py | Apache-2.0 |
def load_data_list(self) -> List[Dict]:
"""Load annotation file to get audio information."""
check_file_exist(self.ann_file)
data_list = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
filename = line_split[idx]
if self.data_prefix['audio'] is not None:
filename = osp.join(self.data_prefix['audio'], filename)
video_info['audio_path'] = filename
idx += 1
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
video_info['label'] = label
else:
assert len(label) == 1
video_info['label'] = label[0]
data_list.append(video_info)
return data_list | Load annotation file to get audio information. | load_data_list | python | open-mmlab/mmaction2 | mmaction/datasets/audio_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/audio_dataset.py | Apache-2.0 |
def get_type(transform: Union[dict, Callable]) -> str:
"""get the type of the transform."""
if isinstance(transform, dict) and 'type' in transform:
return transform['type']
elif callable(transform):
return transform.__repr__().split('(')[0]
else:
raise TypeError | get the type of the transform. | get_type | python | open-mmlab/mmaction2 | mmaction/datasets/repeat_aug_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/repeat_aug_dataset.py | Apache-2.0 |
def prepare_data(self, idx) -> List[dict]:
"""Get data processed by ``self.pipeline``.
Reduce the video loading and decompressing.
Args:
idx (int): The index of ``data_info``.
Returns:
List[dict]: A list of length num_repeats.
"""
transforms = self.pipeline.transforms
data_info = self.get_data_info(idx)
data_info = transforms[0](data_info) # DecordInit
frame_inds_list, frame_inds_length = [], [0]
fake_data_info = dict(
total_frames=data_info['total_frames'],
start_index=data_info['start_index'])
if not self.sample_once:
for repeat in range(self.num_repeats):
data_info_ = transforms[1](fake_data_info) # SampleFrames
frame_inds = data_info_['frame_inds']
frame_inds_list.append(frame_inds.reshape(-1))
frame_inds_length.append(frame_inds.size +
frame_inds_length[-1])
else:
data_info_ = transforms[1](fake_data_info) # SampleFrames
frame_inds = data_info_['frame_inds']
for repeat in range(self.num_repeats):
frame_inds_list.append(frame_inds.reshape(-1))
frame_inds_length.append(frame_inds.size +
frame_inds_length[-1])
for key in data_info_:
data_info[key] = data_info_[key]
data_info['frame_inds'] = np.concatenate(frame_inds_list)
data_info = transforms[2](data_info) # DecordDecode
imgs = data_info.pop('imgs')
data_info_list = []
for repeat in range(self.num_repeats):
data_info_ = deepcopy(data_info)
start = frame_inds_length[repeat]
end = frame_inds_length[repeat + 1]
data_info_['imgs'] = imgs[start:end]
for transform in transforms[3:]:
data_info_ = transform(data_info_)
data_info_list.append(data_info_)
del imgs
return data_info_list | Get data processed by ``self.pipeline``.
Reduce the video loading and decompressing.
Args:
idx (int): The index of ``data_info``.
Returns:
List[dict]: A list of length num_repeats. | prepare_data | python | open-mmlab/mmaction2 | mmaction/datasets/repeat_aug_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/repeat_aug_dataset.py | Apache-2.0 |
def parse_img_record(self, img_records: List[dict]) -> tuple:
"""Merge image records of the same entity at the same time.
Args:
img_records (List[dict]): List of img_records (lines in AVA
annotations).
Returns:
Tuple(list): A tuple consists of lists of bboxes, action labels and
entity_ids.
"""
bboxes, labels, entity_ids = [], [], []
while len(img_records) > 0:
img_record = img_records[0]
num_img_records = len(img_records)
selected_records = [
x for x in img_records
if np.array_equal(x['entity_box'], img_record['entity_box'])
]
num_selected_records = len(selected_records)
img_records = [
x for x in img_records if
not np.array_equal(x['entity_box'], img_record['entity_box'])
]
assert len(img_records) + num_selected_records == num_img_records
bboxes.append(img_record['entity_box'])
valid_labels = np.array([
selected_record['label']
for selected_record in selected_records
])
# The format can be directly used by BCELossWithLogits
if self.multilabel:
label = np.zeros(self.num_classes, dtype=np.float32)
label[valid_labels] = 1.
else:
label = valid_labels
labels.append(label)
entity_ids.append(img_record['entity_id'])
bboxes = np.stack(bboxes)
labels = np.stack(labels)
entity_ids = np.stack(entity_ids)
return bboxes, labels, entity_ids | Merge image records of the same entity at the same time.
Args:
img_records (List[dict]): List of img_records (lines in AVA
annotations).
Returns:
Tuple(list): A tuple consists of lists of bboxes, action labels and
entity_ids. | parse_img_record | python | open-mmlab/mmaction2 | mmaction/datasets/ava_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/ava_dataset.py | Apache-2.0 |
def load_data_list(self) -> List[dict]:
"""Load AVA annotations."""
exists(self.ann_file)
data_list = []
records_dict_by_img = defaultdict(list)
fin = list_from_file(self.ann_file)
for line in fin:
line_split = line.strip().split(',')
label = int(line_split[6])
if self.custom_classes is not None:
if label not in self.custom_classes:
continue
label = self.custom_classes.index(label)
video_id = line_split[0]
timestamp = int(line_split[1]) # count by second or frame.
img_key = f'{video_id},{timestamp:04d}'
entity_box = np.array(list(map(float, line_split[2:6])))
entity_id = int(line_split[7])
if self.use_frames:
shot_info = (0, (self.timestamp_end - self.timestamp_start) *
self._FPS)
# for video data, automatically get shot info when decoding
else:
shot_info = None
video_info = dict(
video_id=video_id,
timestamp=timestamp,
entity_box=entity_box,
label=label,
entity_id=entity_id,
shot_info=shot_info)
records_dict_by_img[img_key].append(video_info)
for img_key in records_dict_by_img:
video_id, timestamp = img_key.split(',')
bboxes, labels, entity_ids = self.parse_img_record(
records_dict_by_img[img_key])
ann = dict(
gt_bboxes=bboxes, gt_labels=labels, entity_ids=entity_ids)
frame_dir = video_id
if self.data_prefix['img'] is not None:
frame_dir = osp.join(self.data_prefix['img'], frame_dir)
video_info = dict(
frame_dir=frame_dir,
video_id=video_id,
timestamp=int(timestamp),
img_key=img_key,
shot_info=shot_info,
fps=self._FPS,
ann=ann)
if not self.use_frames:
video_info['filename'] = video_info.pop('frame_dir')
data_list.append(video_info)
return data_list | Load AVA annotations. | load_data_list | python | open-mmlab/mmaction2 | mmaction/datasets/ava_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/ava_dataset.py | Apache-2.0 |
def filter_data(self) -> List[dict]:
"""Filter out records in the exclude_file."""
valid_indexes = []
if self.exclude_file is None:
valid_indexes = list(range(len(self.data_list)))
else:
exclude_video_infos = [
x.strip().split(',') for x in open(self.exclude_file)
]
for i, data_info in enumerate(self.data_list):
valid_indexes.append(i)
for video_id, timestamp in exclude_video_infos:
if (data_info['video_id'] == video_id
and data_info['timestamp'] == int(timestamp)):
valid_indexes.pop()
break
logger = MMLogger.get_current_instance()
logger.info(f'{len(valid_indexes)} out of {len(self.data_list)}'
f' frames are valid.')
data_list = [self.data_list[i] for i in valid_indexes]
return data_list | Filter out records in the exclude_file. | filter_data | python | open-mmlab/mmaction2 | mmaction/datasets/ava_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/ava_dataset.py | Apache-2.0 |
def get_data_info(self, idx: int) -> dict:
"""Get annotation by index."""
data_info = super().get_data_info(idx)
img_key = data_info['img_key']
data_info['filename_tmpl'] = self.filename_tmpl
data_info['timestamp_start'] = self.timestamp_start
data_info['timestamp_end'] = self.timestamp_end
if self.proposals is not None:
if img_key not in self.proposals:
data_info['proposals'] = np.array([[0, 0, 1, 1]])
data_info['scores'] = np.array([1])
else:
proposals = self.proposals[img_key]
assert proposals.shape[-1] in [4, 5]
if proposals.shape[-1] == 5:
thr = min(self.person_det_score_thr, max(proposals[:, 4]))
positive_inds = (proposals[:, 4] >= thr)
proposals = proposals[positive_inds]
proposals = proposals[:self.num_max_proposals]
data_info['proposals'] = proposals[:, :4]
data_info['scores'] = proposals[:, 4]
else:
proposals = proposals[:self.num_max_proposals]
data_info['proposals'] = proposals
assert data_info['proposals'].max() <= 1 and \
data_info['proposals'].min() >= 0, \
(f'relative proposals invalid: max value '
f'{data_info["proposals"].max()}, min value '
f'{data_info["proposals"].min()}')
ann = data_info.pop('ann')
data_info['gt_bboxes'] = ann['gt_bboxes']
data_info['gt_labels'] = ann['gt_labels']
data_info['entity_ids'] = ann['entity_ids']
return data_info | Get annotation by index. | get_data_info | python | open-mmlab/mmaction2 | mmaction/datasets/ava_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/ava_dataset.py | Apache-2.0 |
def parse_img_record(self, img_records: List[dict]) -> tuple:
"""Merge image records of the same entity at the same time.
Args:
img_records (List[dict]): List of img_records (lines in AVA
annotations).
Returns:
Tuple(list): A tuple consists of lists of bboxes, action labels and
entity_ids.
"""
bboxes, labels, entity_ids = [], [], []
while len(img_records) > 0:
img_record = img_records[0]
num_img_records = len(img_records)
selected_records = [
x for x in img_records
if np.array_equal(x['entity_box'], img_record['entity_box'])
]
num_selected_records = len(selected_records)
img_records = [
x for x in img_records if
not np.array_equal(x['entity_box'], img_record['entity_box'])
]
assert len(img_records) + num_selected_records == num_img_records
bboxes.append(img_record['entity_box'])
valid_labels = np.array([
selected_record['label']
for selected_record in selected_records
])
# The format can be directly used by BCELossWithLogits
label = np.zeros(self.num_classes, dtype=np.float32)
label[valid_labels] = 1.
labels.append(label)
entity_ids.append(img_record['entity_id'])
bboxes = np.stack(bboxes)
labels = np.stack(labels)
entity_ids = np.stack(entity_ids)
return bboxes, labels, entity_ids | Merge image records of the same entity at the same time.
Args:
img_records (List[dict]): List of img_records (lines in AVA
annotations).
Returns:
Tuple(list): A tuple consists of lists of bboxes, action labels and
entity_ids. | parse_img_record | python | open-mmlab/mmaction2 | mmaction/datasets/ava_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/ava_dataset.py | Apache-2.0 |
def filter_data(self) -> List[dict]:
"""Filter out records in the exclude_file."""
valid_indexes = []
if self.exclude_file is None:
valid_indexes = list(range(len(self.data_list)))
else:
exclude_video_infos = [
x.strip().split(',') for x in open(self.exclude_file)
]
for i, data_info in enumerate(self.data_list):
valid_indexes.append(i)
for video_id, timestamp in exclude_video_infos:
if (data_info['video_id'] == video_id
and data_info['timestamp'] == int(timestamp)):
valid_indexes.pop()
break
logger = MMLogger.get_current_instance()
logger.info(f'{len(valid_indexes)} out of {len(self.data_list)}'
f' frames are valid.')
data_list = [self.data_list[i] for i in valid_indexes]
return data_list | Filter out records in the exclude_file. | filter_data | python | open-mmlab/mmaction2 | mmaction/datasets/ava_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/ava_dataset.py | Apache-2.0 |
def load_data_list(self) -> List[dict]:
"""Load AVA annotations."""
exists(self.ann_file)
data_list = []
records_dict_by_img = defaultdict(list)
fin = list_from_file(self.ann_file)
for line in fin:
line_split = line.strip().split(',')
label = int(line_split[6])
if self.custom_classes is not None:
if label not in self.custom_classes:
continue
label = self.custom_classes.index(label)
video_id = line_split[0]
timestamp = int(line_split[1])
img_key = f'{video_id},{timestamp:04d}'
entity_box = np.array(list(map(float, line_split[2:6])))
entity_id = int(line_split[7])
start, end = self.get_timestamp(video_id)
shot_info = (1, (end - start) * self._FPS + 1)
video_info = dict(
video_id=video_id,
timestamp=timestamp,
entity_box=entity_box,
label=label,
entity_id=entity_id,
shot_info=shot_info)
records_dict_by_img[img_key].append(video_info)
for img_key in records_dict_by_img:
video_id, timestamp = img_key.split(',')
start, end = self.get_timestamp(video_id)
bboxes, labels, entity_ids = self.parse_img_record(
records_dict_by_img[img_key])
ann = dict(
gt_bboxes=bboxes, gt_labels=labels, entity_ids=entity_ids)
frame_dir = video_id
if self.data_prefix['img'] is not None:
frame_dir = osp.join(self.data_prefix['img'], frame_dir)
video_info = dict(
frame_dir=frame_dir,
video_id=video_id,
timestamp=int(timestamp),
timestamp_start=start,
timestamp_end=end,
img_key=img_key,
shot_info=shot_info,
fps=self._FPS,
ann=ann)
data_list.append(video_info)
return data_list | Load AVA annotations. | load_data_list | python | open-mmlab/mmaction2 | mmaction/datasets/ava_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/ava_dataset.py | Apache-2.0 |
def get_data_info(self, idx: int) -> dict:
"""Get annotation by index."""
data_info = super().get_data_info(idx)
img_key = data_info['img_key']
data_info['filename_tmpl'] = self.filename_tmpl
if 'timestamp_start' not in data_info:
data_info['timestamp_start'] = self.timestamp_start
data_info['timestamp_end'] = self.timestamp_end
if self.proposals is not None:
if len(img_key) == 16:
proposal_key = img_key
else:
video_id, timestamp = img_key.split(',')
vid = '_'.join(video_id.split('_')[:-2])
timestamp = int(timestamp)
proposal_key = f'{vid},{timestamp:04d}'
if proposal_key not in self.proposals:
data_info['proposals'] = np.array([[0, 0, 1, 1]])
data_info['scores'] = np.array([1])
else:
proposals = self.proposals[proposal_key]
assert proposals.shape[-1] in [4, 5]
if proposals.shape[-1] == 5:
thr = min(self.person_det_score_thr, max(proposals[:, 4]))
positive_inds = (proposals[:, 4] >= thr)
proposals = proposals[positive_inds]
proposals = proposals[:self.num_max_proposals]
data_info['proposals'] = proposals[:, :4]
data_info['scores'] = proposals[:, 4]
else:
proposals = proposals[:self.num_max_proposals]
data_info['proposals'] = proposals
ann = data_info.pop('ann')
data_info['gt_bboxes'] = ann['gt_bboxes']
data_info['gt_labels'] = ann['gt_labels']
data_info['entity_ids'] = ann['entity_ids']
return data_info | Get annotation by index. | get_data_info | python | open-mmlab/mmaction2 | mmaction/datasets/ava_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/ava_dataset.py | Apache-2.0 |
def load_data_list(self) -> List[dict]:
"""Load annotation file to get video information."""
exists(self.ann_file)
data_list = []
with open(self.ann_file) as f:
anno_database = f.readlines()
for item in anno_database:
first_part, query_sentence = item.strip().split('##')
query_sentence = query_sentence.replace('.', '')
query_words = nltk.word_tokenize(query_sentence)
query_tokens = [self.word2id[word] for word in query_words]
query_length = len(query_tokens)
query_tokens = torch.from_numpy(np.array(query_tokens))
vid_name, start_time, end_time = first_part.split()
duration = float(self.duration_info[vid_name])
fps = float(self.fps_info[vid_name])
gt_start_time = float(start_time)
gt_end_time = float(end_time)
gt_bbox = (gt_start_time / duration, min(gt_end_time / duration,
1))
num_frames = int(self.num_frames[vid_name])
proposal_frames = self.get_proposals(num_frames)
proposals = proposal_frames / num_frames
proposals = torch.from_numpy(proposals)
proposal_indexes = proposal_frames / self.ft_interval
proposal_indexes = proposal_indexes.astype(np.int32)
info = dict(
vid_name=vid_name,
fps=fps,
num_frames=num_frames,
duration=duration,
query_tokens=query_tokens,
query_length=query_length,
gt_start_time=gt_start_time,
gt_end_time=gt_end_time,
gt_bbox=gt_bbox,
proposals=proposals,
num_proposals=proposals.shape[0],
proposal_indexes=proposal_indexes)
data_list.append(info)
return data_list | Load annotation file to get video information. | load_data_list | python | open-mmlab/mmaction2 | mmaction/datasets/charades_sta_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/charades_sta_dataset.py | Apache-2.0 |
def get_data_info(self, idx: int) -> dict:
"""Get annotation by index."""
data_info = super().get_data_info(idx)
vid_name = data_info['vid_name']
feature_path = os.path.join(self.data_prefix['video'],
f'{vid_name}.pt')
vid_feature = torch.load(feature_path)
proposal_feats = []
proposal_indexes = data_info['proposal_indexes'].clip(
max=vid_feature.shape[0] - 1)
for s, e in proposal_indexes:
prop_feature, _ = vid_feature[s:e + 1].max(dim=0)
proposal_feats.append(prop_feature)
proposal_feats = torch.stack(proposal_feats)
data_info['raw_feature'] = proposal_feats
return data_info | Get annotation by index. | get_data_info | python | open-mmlab/mmaction2 | mmaction/datasets/charades_sta_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/charades_sta_dataset.py | Apache-2.0 |
def load_data_list(self) -> List[Dict]:
"""Load annotation file to get video information."""
exists(self.ann_file)
data_list = []
with open(self.ann_file) as f:
data_lines = json.load(f)
for data in data_lines:
answers = data['answer']
if isinstance(answers, str):
answers = [answers]
count = Counter(answers)
answer_weight = [i / len(answers) for i in count.values()]
data_item = dict(
question_id=data['question_id'],
filename=osp.join(self.data_prefix['video'],
data['video']),
question=pre_text(data['question']),
gt_answer=list(count.keys()),
gt_answer_weight=answer_weight)
data_list.append(data_item)
return data_list | Load annotation file to get video information. | load_data_list | python | open-mmlab/mmaction2 | mmaction/datasets/msrvtt_datasets.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/msrvtt_datasets.py | Apache-2.0 |
def load_data_list(self) -> List[Dict]:
"""Load annotation file to get video information."""
exists(self.ann_file)
data_list = []
with open(self.ann_file) as f:
data_lines = json.load(f)
for data in data_lines:
data_item = dict(
filename=osp.join(self.data_prefix['video'],
data['video']),
label=data['answer'],
caption_options=[pre_text(c) for c in data['caption']])
data_list.append(data_item)
return data_list | Load annotation file to get video information. | load_data_list | python | open-mmlab/mmaction2 | mmaction/datasets/msrvtt_datasets.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/msrvtt_datasets.py | Apache-2.0 |
def load_data_list(self) -> List[Dict]:
"""Load annotation file to get video information."""
exists(self.ann_file)
data_list = []
with open(self.ann_file) as f:
data_lines = json.load(f)
video_idx = 0
text_idx = 0
for data in data_lines:
# don't consider multiple videos or multiple captions
video_path = osp.join(self.data_prefix['video'], data['video'])
data_item = dict(
filename=video_path,
text=[],
gt_video_id=[],
gt_text_id=[])
if isinstance(data['caption'], str):
data['caption'] = [data['caption']]
for text in data['caption']:
text = pre_text(text)
data_item['text'].append(text)
data_item['gt_video_id'].append(video_idx)
data_item['gt_text_id'].append(text_idx)
text_idx += 1
video_idx += 1
data_list.append(data_item)
self.num_videos = video_idx
self.num_texts = text_idx
return data_list | Load annotation file to get video information. | load_data_list | python | open-mmlab/mmaction2 | mmaction/datasets/msrvtt_datasets.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/msrvtt_datasets.py | Apache-2.0 |
def load_data_list(self) -> List[dict]:
"""Load annotation file to get video information."""
exists(self.ann_file)
data_list = []
fin = list_from_file(self.ann_file)
for line in fin:
line_split = line.strip().split(self.delimiter)
if self.multi_class:
assert self.num_classes is not None
filename, label = line_split[0], line_split[1:]
label = list(map(int, label))
# add fake label for inference datalist without label
elif len(line_split) == 1:
filename, label = line_split[0], -1
else:
filename, label = line_split
label = int(label)
if self.data_prefix['video'] is not None:
filename = osp.join(self.data_prefix['video'], filename)
data_list.append(dict(filename=filename, label=label))
return data_list | Load annotation file to get video information. | load_data_list | python | open-mmlab/mmaction2 | mmaction/datasets/video_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/video_dataset.py | Apache-2.0 |
def load_data_list(self) -> List[dict]:
"""Load annotation file to get video information."""
exists(self.ann_file)
data_list = []
anno_database = mmengine.load(self.ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
feature_path = video_name + '.csv'
feature_path = '%s/%s' % (self.data_prefix['video'], feature_path)
video_info['feature_path'] = feature_path
video_info['video_name'] = video_name
data_list.append(video_info)
return data_list | Load annotation file to get video information. | load_data_list | python | open-mmlab/mmaction2 | mmaction/datasets/activitynet_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/activitynet_dataset.py | Apache-2.0 |
def load_data_list(self) -> List[dict]:
"""Load annotation file to get video information."""
exists(self.ann_file)
data_list = []
fin = list_from_file(self.ann_file)
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
# idx for frame_dir
frame_dir = line_split[idx]
if self.data_prefix['img'] is not None:
frame_dir = osp.join(self.data_prefix['img'], frame_dir)
video_info['frame_dir'] = frame_dir
idx += 1
if self.with_offset:
# idx for offset and total_frames
video_info['offset'] = int(line_split[idx])
video_info['total_frames'] = int(line_split[idx + 1])
idx += 2
else:
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
# add fake label for inference datalist without label
if not label:
label = [-1]
if self.multi_class:
assert self.num_classes is not None
video_info['label'] = label
else:
assert len(label) == 1
video_info['label'] = label[0]
data_list.append(video_info)
return data_list | Load annotation file to get video information. | load_data_list | python | open-mmlab/mmaction2 | mmaction/datasets/rawframe_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/rawframe_dataset.py | Apache-2.0 |
def get_data_info(self, idx: int) -> dict:
"""Get annotation by index."""
data_info = super().get_data_info(idx)
data_info['filename_tmpl'] = self.filename_tmpl
return data_info | Get annotation by index. | get_data_info | python | open-mmlab/mmaction2 | mmaction/datasets/rawframe_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/rawframe_dataset.py | Apache-2.0 |
def get_data_info(self, idx: int) -> dict:
"""Get annotation by index."""
data_info = super().get_data_info(idx)
data_info['modality'] = self.modality
data_info['start_index'] = self.start_index
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[data_info['label']] = 1.
data_info['label'] = onehot
return data_info | Get annotation by index. | get_data_info | python | open-mmlab/mmaction2 | mmaction/datasets/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/base.py | Apache-2.0 |
def load_data_list(self) -> List[Dict]:
"""Load annotation file to get video information."""
exists(self.ann_file)
data_list = []
with open(self.ann_file) as f:
video_dict = json.load(f)
for filename, texts in video_dict.items():
filename = osp.join(self.data_prefix['video'], filename)
video_text_pairs = []
for text in texts:
data_item = dict(filename=filename, text=text)
video_text_pairs.append(data_item)
data_list.extend(video_text_pairs)
return data_list | Load annotation file to get video information. | load_data_list | python | open-mmlab/mmaction2 | mmaction/datasets/video_text_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/video_text_dataset.py | Apache-2.0 |
def load_data_list(self) -> List[Dict]:
"""Load annotation file to get skeleton information."""
assert self.ann_file.endswith('.pkl')
mmengine.exists(self.ann_file)
data_list = mmengine.load(self.ann_file)
if self.split is not None:
split, annos = data_list['split'], data_list['annotations']
identifier = 'filename' if 'filename' in annos[0] else 'frame_dir'
split = set(split[self.split])
data_list = [x for x in annos if x[identifier] in split]
# Sometimes we may need to load video from the file
if 'video' in self.data_prefix:
for item in data_list:
if 'filename' in item:
item['filename'] = osp.join(self.data_prefix['video'],
item['filename'])
if 'frame_dir' in item:
item['frame_dir'] = osp.join(self.data_prefix['video'],
item['frame_dir'])
return data_list | Load annotation file to get skeleton information. | load_data_list | python | open-mmlab/mmaction2 | mmaction/datasets/pose_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/pose_dataset.py | Apache-2.0 |
def filter_data(self) -> List[Dict]:
"""Filter out invalid samples."""
if self.valid_ratio is not None and isinstance(
self.valid_ratio, float) and self.valid_ratio > 0:
self.data_list = [
x for x in self.data_list if x['valid'][self.box_thr] /
x['total_frames'] >= self.valid_ratio
]
for item in self.data_list:
assert 'box_score' in item,\
'if valid_ratio is a positive number,' \
'item should have field `box_score`'
anno_inds = (item['box_score'] >= self.box_thr)
item['anno_inds'] = anno_inds
logger = MMLogger.get_current_instance()
logger.info(
f'{len(self.data_list)} videos remain after valid thresholding')
return self.data_list | Filter out invalid samples. | filter_data | python | open-mmlab/mmaction2 | mmaction/datasets/pose_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/pose_dataset.py | Apache-2.0 |
def get_data_info(self, idx: int) -> Dict:
"""Get annotation by index."""
data_info = super().get_data_info(idx)
# Sometimes we may need to load skeleton from the file
if 'skeleton' in self.data_prefix:
identifier = 'filename' if 'filename' in data_info \
else 'frame_dir'
ske_name = data_info[identifier]
ske_path = osp.join(self.data_prefix['skeleton'],
ske_name + '.pkl')
ske = mmengine.load(ske_path)
for k in ske:
data_info[k] = ske[k]
return data_info | Get annotation by index. | get_data_info | python | open-mmlab/mmaction2 | mmaction/datasets/pose_dataset.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/pose_dataset.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""Perform the pose decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
required_keys = ['total_frames', 'frame_inds', 'keypoint']
for k in required_keys:
assert k in results
total_frames = results['total_frames']
frame_inds = results.pop('frame_inds')
keypoint = results['keypoint']
if 'anno_inds' in results:
frame_inds = frame_inds[results['anno_inds']]
keypoint = keypoint[results['anno_inds']]
assert np.all(np.diff(frame_inds) >= 0), \
'frame_inds should be monotonical increasing'
def mapinds(inds):
uni = np.unique(inds)
map_ = {x: i for i, x in enumerate(uni)}
inds = [map_[x] for x in inds]
return np.array(inds, dtype=np.int16)
if self.squeeze:
frame_inds = mapinds(frame_inds)
total_frames = np.max(frame_inds) + 1
results['total_frames'] = total_frames
num_joints = keypoint.shape[1]
num_person = get_mode(frame_inds)[-1][0]
new_kp = np.zeros([num_person, total_frames, num_joints, 2],
dtype=np.float16)
new_kpscore = np.zeros([num_person, total_frames, num_joints],
dtype=np.float16)
nperson_per_frame = np.zeros([total_frames], dtype=np.int16)
for frame_ind, kp in zip(frame_inds, keypoint):
person_ind = nperson_per_frame[frame_ind]
new_kp[person_ind, frame_ind] = kp[:, :2]
new_kpscore[person_ind, frame_ind] = kp[:, 2]
nperson_per_frame[frame_ind] += 1
if num_person > self.max_person:
for i in range(total_frames):
nperson = nperson_per_frame[i]
val = new_kpscore[:nperson, i]
score_sum = val.sum(-1)
inds = sorted(range(nperson), key=lambda x: -score_sum[x])
new_kpscore[:nperson, i] = new_kpscore[inds, i]
new_kp[:nperson, i] = new_kp[inds, i]
num_person = self.max_person
results['num_person'] = num_person
results['keypoint'] = new_kp[:num_person]
results['keypoint_score'] = new_kpscore[:num_person]
return results | Perform the pose decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def generate_a_heatmap(self, arr: np.ndarray, centers: np.ndarray,
max_values: np.ndarray) -> None:
"""Generate pseudo heatmap for one keypoint in one frame.
Args:
arr (np.ndarray): The array to store the generated heatmaps.
Shape: img_h * img_w.
centers (np.ndarray): The coordinates of corresponding keypoints
(of multiple persons). Shape: M * 2.
max_values (np.ndarray): The max values of each keypoint. Shape: M.
"""
sigma = self.sigma
img_h, img_w = arr.shape
for center, max_value in zip(centers, max_values):
if max_value < self.eps:
continue
mu_x, mu_y = center[0], center[1]
st_x = max(int(mu_x - 3 * sigma), 0)
ed_x = min(int(mu_x + 3 * sigma) + 1, img_w)
st_y = max(int(mu_y - 3 * sigma), 0)
ed_y = min(int(mu_y + 3 * sigma) + 1, img_h)
x = np.arange(st_x, ed_x, 1, np.float32)
y = np.arange(st_y, ed_y, 1, np.float32)
# if the keypoint not in the heatmap coordinate system
if not (len(x) and len(y)):
continue
y = y[:, None]
patch = np.exp(-((x - mu_x)**2 + (y - mu_y)**2) / 2 / sigma**2)
patch = patch * max_value
arr[st_y:ed_y, st_x:ed_x] = \
np.maximum(arr[st_y:ed_y, st_x:ed_x], patch) | Generate pseudo heatmap for one keypoint in one frame.
Args:
arr (np.ndarray): The array to store the generated heatmaps.
Shape: img_h * img_w.
centers (np.ndarray): The coordinates of corresponding keypoints
(of multiple persons). Shape: M * 2.
max_values (np.ndarray): The max values of each keypoint. Shape: M. | generate_a_heatmap | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def generate_a_limb_heatmap(self, arr: np.ndarray, starts: np.ndarray,
ends: np.ndarray, start_values: np.ndarray,
end_values: np.ndarray) -> None:
"""Generate pseudo heatmap for one limb in one frame.
Args:
arr (np.ndarray): The array to store the generated heatmaps.
Shape: img_h * img_w.
starts (np.ndarray): The coordinates of one keypoint in the
corresponding limbs. Shape: M * 2.
ends (np.ndarray): The coordinates of the other keypoint in the
corresponding limbs. Shape: M * 2.
start_values (np.ndarray): The max values of one keypoint in the
corresponding limbs. Shape: M.
end_values (np.ndarray): The max values of the other keypoint
in the corresponding limbs. Shape: M.
"""
sigma = self.sigma
img_h, img_w = arr.shape
for start, end, start_value, end_value in zip(starts, ends,
start_values,
end_values):
value_coeff = min(start_value, end_value)
if value_coeff < self.eps:
continue
min_x, max_x = min(start[0], end[0]), max(start[0], end[0])
min_y, max_y = min(start[1], end[1]), max(start[1], end[1])
min_x = max(int(min_x - 3 * sigma), 0)
max_x = min(int(max_x + 3 * sigma) + 1, img_w)
min_y = max(int(min_y - 3 * sigma), 0)
max_y = min(int(max_y + 3 * sigma) + 1, img_h)
x = np.arange(min_x, max_x, 1, np.float32)
y = np.arange(min_y, max_y, 1, np.float32)
if not (len(x) and len(y)):
continue
y = y[:, None]
x_0 = np.zeros_like(x)
y_0 = np.zeros_like(y)
# distance to start keypoints
d2_start = ((x - start[0])**2 + (y - start[1])**2)
# distance to end keypoints
d2_end = ((x - end[0])**2 + (y - end[1])**2)
# the distance between start and end keypoints.
d2_ab = ((start[0] - end[0])**2 + (start[1] - end[1])**2)
if d2_ab < 1:
self.generate_a_heatmap(arr, start[None], start_value[None])
continue
coeff = (d2_start - d2_end + d2_ab) / 2. / d2_ab
a_dominate = coeff <= 0
b_dominate = coeff >= 1
seg_dominate = 1 - a_dominate - b_dominate
position = np.stack([x + y_0, y + x_0], axis=-1)
projection = start + np.stack([coeff, coeff], axis=-1) * (
end - start)
d2_line = position - projection
d2_line = d2_line[:, :, 0]**2 + d2_line[:, :, 1]**2
d2_seg = (
a_dominate * d2_start + b_dominate * d2_end +
seg_dominate * d2_line)
patch = np.exp(-d2_seg / 2. / sigma**2)
patch = patch * value_coeff
arr[min_y:max_y, min_x:max_x] = \
np.maximum(arr[min_y:max_y, min_x:max_x], patch) | Generate pseudo heatmap for one limb in one frame.
Args:
arr (np.ndarray): The array to store the generated heatmaps.
Shape: img_h * img_w.
starts (np.ndarray): The coordinates of one keypoint in the
corresponding limbs. Shape: M * 2.
ends (np.ndarray): The coordinates of the other keypoint in the
corresponding limbs. Shape: M * 2.
start_values (np.ndarray): The max values of one keypoint in the
corresponding limbs. Shape: M.
end_values (np.ndarray): The max values of the other keypoint
in the corresponding limbs. Shape: M. | generate_a_limb_heatmap | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def generate_heatmap(self, arr: np.ndarray, kps: np.ndarray,
max_values: np.ndarray) -> None:
"""Generate pseudo heatmap for all keypoints and limbs in one frame (if
needed).
Args:
arr (np.ndarray): The array to store the generated heatmaps.
Shape: V * img_h * img_w.
kps (np.ndarray): The coordinates of keypoints in this frame.
Shape: M * V * 2.
max_values (np.ndarray): The confidence score of each keypoint.
Shape: M * V.
"""
if self.with_kp:
num_kp = kps.shape[1]
for i in range(num_kp):
self.generate_a_heatmap(arr[i], kps[:, i], max_values[:, i])
if self.with_limb:
for i, limb in enumerate(self.skeletons):
start_idx, end_idx = limb
starts = kps[:, start_idx]
ends = kps[:, end_idx]
start_values = max_values[:, start_idx]
end_values = max_values[:, end_idx]
self.generate_a_limb_heatmap(arr[i], starts, ends,
start_values, end_values) | Generate pseudo heatmap for all keypoints and limbs in one frame (if
needed).
Args:
arr (np.ndarray): The array to store the generated heatmaps.
Shape: V * img_h * img_w.
kps (np.ndarray): The coordinates of keypoints in this frame.
Shape: M * V * 2.
max_values (np.ndarray): The confidence score of each keypoint.
Shape: M * V. | generate_heatmap | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def gen_an_aug(self, results: Dict) -> np.ndarray:
"""Generate pseudo heatmaps for all frames.
Args:
results (dict): The dictionary that contains all info of a sample.
Returns:
np.ndarray: The generated pseudo heatmaps.
"""
all_kps = results['keypoint'].astype(np.float32)
kp_shape = all_kps.shape
if 'keypoint_score' in results:
all_kpscores = results['keypoint_score']
else:
all_kpscores = np.ones(kp_shape[:-1], dtype=np.float32)
img_h, img_w = results['img_shape']
# scale img_h, img_w and kps
img_h = int(img_h * self.scaling + 0.5)
img_w = int(img_w * self.scaling + 0.5)
all_kps[..., :2] *= self.scaling
num_frame = kp_shape[1]
num_c = 0
if self.with_kp:
num_c += all_kps.shape[2]
if self.with_limb:
num_c += len(self.skeletons)
ret = np.zeros([num_frame, num_c, img_h, img_w], dtype=np.float32)
for i in range(num_frame):
# M, V, C
kps = all_kps[:, i]
# M, C
kpscores = all_kpscores[:, i] if self.use_score else \
np.ones_like(all_kpscores[:, i])
self.generate_heatmap(ret[i], kps, kpscores)
return ret | Generate pseudo heatmaps for all frames.
Args:
results (dict): The dictionary that contains all info of a sample.
Returns:
np.ndarray: The generated pseudo heatmaps. | gen_an_aug | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""Generate pseudo heatmaps based on joint coordinates and confidence.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
heatmap = self.gen_an_aug(results)
key = 'heatmap_imgs' if 'imgs' in results else 'imgs'
if self.double:
indices = np.arange(heatmap.shape[1], dtype=np.int64)
left, right = (self.left_kp, self.right_kp) if self.with_kp else (
self.left_limb, self.right_limb)
for l, r in zip(left, right): # noqa: E741
indices[l] = r
indices[r] = l
heatmap_flip = heatmap[..., ::-1][:, indices]
heatmap = np.concatenate([heatmap, heatmap_flip])
results[key] = heatmap
return results | Generate pseudo heatmaps based on joint coordinates and confidence.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""Convert the coordinates of keypoints to make it more compact.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
img_shape = results['img_shape']
h, w = img_shape
kp = results['keypoint']
# Make NaN zero
kp[np.isnan(kp)] = 0.
kp_x = kp[..., 0]
kp_y = kp[..., 1]
min_x = np.min(kp_x[kp_x != 0], initial=np.Inf)
min_y = np.min(kp_y[kp_y != 0], initial=np.Inf)
max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf)
max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf)
# The compact area is too small
if max_x - min_x < self.threshold or max_y - min_y < self.threshold:
return results
center = ((max_x + min_x) / 2, (max_y + min_y) / 2)
half_width = (max_x - min_x) / 2 * (1 + self.padding)
half_height = (max_y - min_y) / 2 * (1 + self.padding)
if self.hw_ratio is not None:
half_height = max(self.hw_ratio[0] * half_width, half_height)
half_width = max(1 / self.hw_ratio[1] * half_height, half_width)
min_x, max_x = center[0] - half_width, center[0] + half_width
min_y, max_y = center[1] - half_height, center[1] + half_height
# hot update
if not self.allow_imgpad:
min_x, min_y = int(max(0, min_x)), int(max(0, min_y))
max_x, max_y = int(min(w, max_x)), int(min(h, max_y))
else:
min_x, min_y = int(min_x), int(min_y)
max_x, max_y = int(max_x), int(max_y)
kp_x[kp_x != 0] -= min_x
kp_y[kp_y != 0] -= min_y
new_shape = (max_y - min_y, max_x - min_x)
results['img_shape'] = new_shape
# the order is x, y, w, h (in [0, 1]), a tuple
crop_quadruple = results.get('crop_quadruple', (0., 0., 1., 1.))
new_crop_quadruple = (min_x / w, min_y / h, (max_x - min_x) / w,
(max_y - min_y) / h)
crop_quadruple = _combine_quadruple(crop_quadruple, new_crop_quadruple)
results['crop_quadruple'] = crop_quadruple
return results | Convert the coordinates of keypoints to make it more compact.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def unit_vector(self, vector: np.ndarray) -> np.ndarray:
"""Returns the unit vector of the vector."""
return vector / np.linalg.norm(vector) | Returns the unit vector of the vector. | unit_vector | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def angle_between(self, v1: np.ndarray, v2: np.ndarray) -> float:
"""Returns the angle in radians between vectors 'v1' and 'v2'."""
if np.abs(v1).sum() < 1e-6 or np.abs(v2).sum() < 1e-6:
return 0
v1_u = self.unit_vector(v1)
v2_u = self.unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) | Returns the angle in radians between vectors 'v1' and 'v2'. | angle_between | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def rotation_matrix(self, axis: np.ndarray, theta: float) -> np.ndarray:
"""Returns the rotation matrix associated with counterclockwise
rotation about the given axis by theta radians."""
if np.abs(axis).sum() < 1e-6 or np.abs(theta) < 1e-6:
return np.eye(3)
axis = np.asarray(axis)
axis = axis / np.sqrt(np.dot(axis, axis))
a = np.cos(theta / 2.0)
b, c, d = -axis * np.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]) | Returns the rotation matrix associated with counterclockwise
rotation about the given axis by theta radians. | rotation_matrix | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`PreNormalize3D`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
skeleton = results['keypoint']
total_frames = results.get('total_frames', skeleton.shape[1])
M, T, V, C = skeleton.shape
assert T == total_frames
if skeleton.sum() == 0:
return results
index0 = [
i for i in range(T) if not np.all(np.isclose(skeleton[0, i], 0))
]
assert M in [1, 2]
if M == 2:
index1 = [
i for i in range(T)
if not np.all(np.isclose(skeleton[1, i], 0))
]
if len(index0) < len(index1):
skeleton = skeleton[:, np.array(index1)]
skeleton = skeleton[[1, 0]]
else:
skeleton = skeleton[:, np.array(index0)]
else:
skeleton = skeleton[:, np.array(index0)]
T_new = skeleton.shape[1]
if self.align_center:
if skeleton.shape[2] == 25:
main_body_center = skeleton[0, 0, 1].copy()
else:
main_body_center = skeleton[0, 0, -1].copy()
mask = ((skeleton != 0).sum(-1) > 0)[..., None]
skeleton = (skeleton - main_body_center) * mask
if self.align_spine:
joint_bottom = skeleton[0, 0, self.zaxis[0]]
joint_top = skeleton[0, 0, self.zaxis[1]]
axis = np.cross(joint_top - joint_bottom, [0, 0, 1])
angle = self.angle_between(joint_top - joint_bottom, [0, 0, 1])
matrix_z = self.rotation_matrix(axis, angle)
skeleton = np.einsum('abcd,kd->abck', skeleton, matrix_z)
if self.align_shoulder:
joint_rshoulder = skeleton[0, 0, self.xaxis[0]]
joint_lshoulder = skeleton[0, 0, self.xaxis[1]]
axis = np.cross(joint_rshoulder - joint_lshoulder, [1, 0, 0])
angle = self.angle_between(joint_rshoulder - joint_lshoulder,
[1, 0, 0])
matrix_x = self.rotation_matrix(axis, angle)
skeleton = np.einsum('abcd,kd->abck', skeleton, matrix_x)
results['keypoint'] = skeleton
results['total_frames'] = T_new
results['body_center'] = main_body_center
return results | The transform function of :class:`PreNormalize3D`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`PreNormalize2D`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
h, w = results.get('img_shape', self.img_shape)
results['keypoint'][..., 0] = \
(results['keypoint'][..., 0] - (w / 2)) / (w / 2)
results['keypoint'][..., 1] = \
(results['keypoint'][..., 1] - (h / 2)) / (h / 2)
return results | The transform function of :class:`PreNormalize2D`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`JointToBone`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
keypoint = results['keypoint']
M, T, V, C = keypoint.shape
bone = np.zeros((M, T, V, C), dtype=np.float32)
assert C in [2, 3]
for v1, v2 in self.pairs:
bone[..., v1, :] = keypoint[..., v1, :] - keypoint[..., v2, :]
if C == 3 and self.dataset in ['openpose', 'coco']:
score = (keypoint[..., v1, 2] + keypoint[..., v2, 2]) / 2
bone[..., v1, 2] = score
results[self.target] = bone
return results | The transform function of :class:`JointToBone`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`ToMotion`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
data = results[self.source]
M, T, V, C = data.shape
motion = np.zeros_like(data)
assert C in [2, 3]
motion[:, :T - 1] = np.diff(data, axis=1)
if C == 3 and self.dataset in ['openpose', 'coco']:
score = (data[:, :T - 1, :, 2] + data[:, 1:, :, 2]) / 2
motion[:, :T - 1, :, 2] = score
results[self.target] = motion
return results | The transform function of :class:`ToMotion`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`MergeSkeFeat`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
feats = []
for name in self.feat_list:
feats.append(results.pop(name))
feats = np.concatenate(feats, axis=self.axis)
results[self.target] = feats
return results | The transform function of :class:`MergeSkeFeat`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`GenSkeFeat`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
if 'keypoint_score' in results and 'keypoint' in results:
assert self.dataset != 'nturgb+d'
assert results['keypoint'].shape[
-1] == 2, 'Only 2D keypoints have keypoint_score. '
keypoint = results.pop('keypoint')
keypoint_score = results.pop('keypoint_score')
results['keypoint'] = np.concatenate(
[keypoint, keypoint_score[..., None]], -1)
return self.ops(results) | The transform function of :class:`GenSkeFeat`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def _get_train_clips(self, num_frames: int, clip_len: int) -> np.ndarray:
"""Uniformly sample indices for training clips.
Args:
num_frames (int): The number of frames.
clip_len (int): The length of the clip.
Returns:
np.ndarray: The sampled indices for training clips.
"""
all_inds = []
for clip_idx in range(self.num_clips):
if num_frames < clip_len:
start = np.random.randint(0, num_frames)
inds = np.arange(start, start + clip_len)
elif clip_len <= num_frames < 2 * clip_len:
basic = np.arange(clip_len)
inds = np.random.choice(
clip_len + 1, num_frames - clip_len, replace=False)
offset = np.zeros(clip_len + 1, dtype=np.int32)
offset[inds] = 1
offset = np.cumsum(offset)
inds = basic + offset[:-1]
else:
bids = np.array(
[i * num_frames // clip_len for i in range(clip_len + 1)])
bsize = np.diff(bids)
bst = bids[:clip_len]
offset = np.random.randint(bsize)
inds = bst + offset
all_inds.append(inds)
return np.concatenate(all_inds) | Uniformly sample indices for training clips.
Args:
num_frames (int): The number of frames.
clip_len (int): The length of the clip.
Returns:
np.ndarray: The sampled indices for training clips. | _get_train_clips | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def _get_test_clips(self, num_frames: int, clip_len: int) -> np.ndarray:
"""Uniformly sample indices for testing clips.
Args:
num_frames (int): The number of frames.
clip_len (int): The length of the clip.
Returns:
np.ndarray: The sampled indices for testing clips.
"""
np.random.seed(self.seed)
all_inds = []
for i in range(self.num_clips):
if num_frames < clip_len:
start_ind = i if num_frames < self.num_clips \
else i * num_frames // self.num_clips
inds = np.arange(start_ind, start_ind + clip_len)
elif clip_len <= num_frames < clip_len * 2:
basic = np.arange(clip_len)
inds = np.random.choice(
clip_len + 1, num_frames - clip_len, replace=False)
offset = np.zeros(clip_len + 1, dtype=np.int64)
offset[inds] = 1
offset = np.cumsum(offset)
inds = basic + offset[:-1]
else:
bids = np.array(
[i * num_frames // clip_len for i in range(clip_len + 1)])
bsize = np.diff(bids)
bst = bids[:clip_len]
offset = np.random.randint(bsize)
inds = bst + offset
all_inds.append(inds)
return np.concatenate(all_inds) | Uniformly sample indices for testing clips.
Args:
num_frames (int): The number of frames.
clip_len (int): The length of the clip.
Returns:
np.ndarray: The sampled indices for testing clips. | _get_test_clips | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`UniformSampleFrames`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
num_frames = results['total_frames']
if self.test_mode:
inds = self._get_test_clips(num_frames, self.clip_len)
else:
inds = self._get_train_clips(num_frames, self.clip_len)
inds = np.mod(inds, num_frames)
start_index = results.get('start_index', 0)
inds = inds + start_index
if 'keypoint' in results:
kp = results['keypoint']
assert num_frames == kp.shape[1]
num_person = kp.shape[0]
num_persons = [num_person] * num_frames
for i in range(num_frames):
j = num_person - 1
while j >= 0 and np.all(np.abs(kp[j, i]) < 1e-5):
j -= 1
num_persons[i] = j + 1
transitional = [False] * num_frames
for i in range(1, num_frames - 1):
if num_persons[i] != num_persons[i - 1]:
transitional[i] = transitional[i - 1] = True
if num_persons[i] != num_persons[i + 1]:
transitional[i] = transitional[i + 1] = True
inds_int = inds.astype(np.int64)
coeff = np.array([transitional[i] for i in inds_int])
inds = (coeff * inds_int + (1 - coeff) * inds).astype(np.float32)
results['frame_inds'] = inds.astype(np.int32)
results['clip_len'] = self.clip_len
results['frame_interval'] = None
results['num_clips'] = self.num_clips
return results | The transform function of :class:`UniformSampleFrames`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`PadTo`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
total_frames = results['total_frames']
assert total_frames <= self.length
start_index = results.get('start_index', 0)
inds = np.arange(start_index, start_index + self.length)
inds = np.mod(inds, total_frames)
keypoint = results['keypoint'][:, inds].copy()
if self.mode == 'zero':
keypoint[:, total_frames:] = 0
results['keypoint'] = keypoint
results['total_frames'] = self.length
return results | The transform function of :class:`PadTo`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def _load_kp(kp: np.ndarray, frame_inds: np.ndarray) -> np.ndarray:
"""Load keypoints according to sampled indexes."""
return kp[:, frame_inds].astype(np.float32) | Load keypoints according to sampled indexes. | _load_kp | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def _load_kpscore(kpscore: np.ndarray,
frame_inds: np.ndarray) -> np.ndarray:
"""Load keypoint scores according to sampled indexes."""
return kpscore[:, frame_inds].astype(np.float32) | Load keypoint scores according to sampled indexes. | _load_kpscore | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`PoseDecode`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
if 'total_frames' not in results:
results['total_frames'] = results['keypoint'].shape[1]
if 'frame_inds' not in results:
results['frame_inds'] = np.arange(results['total_frames'])
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
frame_inds = results['frame_inds'] + offset
if 'keypoint_score' in results:
results['keypoint_score'] = self._load_kpscore(
results['keypoint_score'], frame_inds)
results['keypoint'] = self._load_kp(results['keypoint'], frame_inds)
return results | The transform function of :class:`PoseDecode`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`MMUniformSampleFrames`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
num_frames = results['total_frames']
modalities = []
for modality, clip_len in self.clip_len.items():
if self.test_mode:
inds = self._get_test_clips(num_frames, clip_len)
else:
inds = self._get_train_clips(num_frames, clip_len)
inds = np.mod(inds, num_frames)
results[f'{modality}_inds'] = inds.astype(np.int32)
modalities.append(modality)
results['clip_len'] = self.clip_len
results['frame_interval'] = None
results['num_clips'] = self.num_clips
if not isinstance(results['modality'], list):
# should override
results['modality'] = modalities
return results | The transform function of :class:`MMUniformSampleFrames`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`MMDecode`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
for mod in results['modality']:
if results[f'{mod}_inds'].ndim != 1:
results[f'{mod}_inds'] = np.squeeze(results[f'{mod}_inds'])
frame_inds = results[f'{mod}_inds']
if mod == 'RGB':
if 'filename' not in results:
results['filename'] = results['frame_dir'] + '.mp4'
video_reader = self._get_video_reader(results['filename'])
imgs = self._decord_load_frames(video_reader, frame_inds)
del video_reader
results['imgs'] = imgs
elif mod == 'Pose':
assert 'keypoint' in results
if 'keypoint_score' not in results:
keypoint_score = [
np.ones(keypoint.shape[:-1], dtype=np.float32)
for keypoint in results['keypoint']
]
results['keypoint_score'] = np.stack(keypoint_score)
results['keypoint'] = self._load_kp(results['keypoint'],
frame_inds)
results['keypoint_score'] = self._load_kpscore(
results['keypoint_score'], frame_inds)
else:
raise NotImplementedError(
f'MMDecode: Modality {mod} not supported')
# We need to scale human keypoints to the new image size
if 'imgs' in results and 'keypoint' in results:
real_img_shape = results['imgs'][0].shape[:2]
if real_img_shape != results['img_shape']:
oh, ow = results['img_shape']
nh, nw = real_img_shape
assert results['keypoint'].shape[-1] in [2, 3]
results['keypoint'][..., 0] *= (nw / ow)
results['keypoint'][..., 1] *= (nh / oh)
results['img_shape'] = real_img_shape
results['original_shape'] = real_img_shape
return results | The transform function of :class:`MMDecode`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def _get_box(self, keypoint: np.ndarray, img_shape: Tuple[int]) -> Tuple:
"""Calculate the bounding box surrounding all joints in the frames."""
h, w = img_shape
kp_x = keypoint[..., 0]
kp_y = keypoint[..., 1]
min_x = np.min(kp_x[kp_x != 0], initial=np.Inf)
min_y = np.min(kp_y[kp_y != 0], initial=np.Inf)
max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf)
max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf)
# The compact area is too small
if max_x - min_x < self.threshold or max_y - min_y < self.threshold:
return 0, 0, w, h
center = ((max_x + min_x) / 2, (max_y + min_y) / 2)
half_width = (max_x - min_x) / 2 * (1 + self.padding)
half_height = (max_y - min_y) / 2 * (1 + self.padding)
if self.hw_ratio is not None:
half_height = max(self.hw_ratio[0] * half_width, half_height)
half_width = max(1 / self.hw_ratio[1] * half_height, half_width)
min_x, max_x = center[0] - half_width, center[0] + half_width
min_y, max_y = center[1] - half_height, center[1] + half_height
# hot update
if not self.allow_imgpad:
min_x, min_y = int(max(0, min_x)), int(max(0, min_y))
max_x, max_y = int(min(w, max_x)), int(min(h, max_y))
else:
min_x, min_y = int(min_x), int(min_y)
max_x, max_y = int(max_x), int(max_y)
return min_x, min_y, max_x, max_y | Calculate the bounding box surrounding all joints in the frames. | _get_box | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def _compact_images(self, imgs: List[np.ndarray], img_shape: Tuple[int],
box: Tuple[int]) -> List:
"""Crop the images acoordding the bounding box."""
h, w = img_shape
min_x, min_y, max_x, max_y = box
pad_l, pad_u, pad_r, pad_d = 0, 0, 0, 0
if min_x < 0:
pad_l = -min_x
min_x, max_x = 0, max_x + pad_l
w += pad_l
if min_y < 0:
pad_u = -min_y
min_y, max_y = 0, max_y + pad_u
h += pad_u
if max_x > w:
pad_r = max_x - w
w = max_x
if max_y > h:
pad_d = max_y - h
h = max_y
if pad_l > 0 or pad_r > 0 or pad_u > 0 or pad_d > 0:
imgs = [
np.pad(img, ((pad_u, pad_d), (pad_l, pad_r), (0, 0)))
for img in imgs
]
imgs = [img[min_y:max_y, min_x:max_x] for img in imgs]
return imgs | Crop the images acoordding the bounding box. | _compact_images | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`MMCompact`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
img_shape = results['img_shape']
kp = results['keypoint']
# Make NaN zero
kp[np.isnan(kp)] = 0.
min_x, min_y, max_x, max_y = self._get_box(kp, img_shape)
kp_x, kp_y = kp[..., 0], kp[..., 1]
kp_x[kp_x != 0] -= min_x
kp_y[kp_y != 0] -= min_y
new_shape = (max_y - min_y, max_x - min_x)
results['img_shape'] = new_shape
results['imgs'] = self._compact_images(results['imgs'], img_shape,
(min_x, min_y, max_x, max_y))
return results | The transform function of :class:`MMCompact`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/pose_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`PackActionInputs`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
packed_results = dict()
if self.collect_keys is not None:
packed_results['inputs'] = dict()
for key in self.collect_keys:
packed_results['inputs'][key] = to_tensor(results[key])
else:
if 'imgs' in results:
imgs = results['imgs']
packed_results['inputs'] = to_tensor(imgs)
elif 'heatmap_imgs' in results:
heatmap_imgs = results['heatmap_imgs']
packed_results['inputs'] = to_tensor(heatmap_imgs)
elif 'keypoint' in results:
keypoint = results['keypoint']
packed_results['inputs'] = to_tensor(keypoint)
elif 'audios' in results:
audios = results['audios']
packed_results['inputs'] = to_tensor(audios)
elif 'text' in results:
text = results['text']
packed_results['inputs'] = to_tensor(text)
else:
raise ValueError(
'Cannot get `imgs`, `keypoint`, `heatmap_imgs`, '
'`audios` or `text` in the input dict of '
'`PackActionInputs`.')
data_sample = ActionDataSample()
if 'gt_bboxes' in results:
instance_data = InstanceData()
for key in self.mapping_table.keys():
instance_data[self.mapping_table[key]] = to_tensor(
results[key])
data_sample.gt_instances = instance_data
if 'proposals' in results:
data_sample.proposals = InstanceData(
bboxes=to_tensor(results['proposals']))
if 'label' in results:
data_sample.set_gt_label(results['label'])
# Set custom algorithm keys
for key in self.algorithm_keys:
if key in results:
data_sample.set_field(results[key], key)
# Set meta keys
img_meta = {k: results[k] for k in self.meta_keys if k in results}
data_sample.set_metainfo(img_meta)
packed_results['data_samples'] = data_sample
return packed_results | The transform function of :class:`PackActionInputs`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/formatting.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py | Apache-2.0 |
def transform(self, results):
"""Method to pack the input data.
Args:
results (dict): Result dict from the data pipeline.
Returns:
dict:
- 'inputs' (obj:`torch.Tensor`): The forward data of models.
- 'data_samples' (obj:`DetDataSample`): The annotation info of the
sample.
"""
packed_results = dict()
if 'raw_feature' in results:
raw_feature = results['raw_feature']
packed_results['inputs'] = to_tensor(raw_feature)
elif 'bsp_feature' in results:
packed_results['inputs'] = torch.tensor(0.)
else:
raise ValueError(
'Cannot get "raw_feature" or "bsp_feature" in the input '
'dict of `PackActionInputs`.')
data_sample = ActionDataSample()
for key in self.keys:
if key not in results:
continue
elif key == 'proposals':
instance_data = InstanceData()
instance_data[key] = to_tensor(results[key])
data_sample.proposals = instance_data
else:
if hasattr(data_sample, 'gt_instances'):
data_sample.gt_instances[key] = to_tensor(results[key])
else:
instance_data = InstanceData()
instance_data[key] = to_tensor(results[key])
data_sample.gt_instances = instance_data
img_meta = {k: results[k] for k in self.meta_keys if k in results}
data_sample.set_metainfo(img_meta)
packed_results['data_samples'] = data_sample
return packed_results | Method to pack the input data.
Args:
results (dict): Result dict from the data pipeline.
Returns:
dict:
- 'inputs' (obj:`torch.Tensor`): The forward data of models.
- 'data_samples' (obj:`DetDataSample`): The annotation info of the
sample. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/formatting.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py | Apache-2.0 |
def transform(self, results):
"""Performs the Transpose formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results | Performs the Transpose formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/formatting.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.