code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def transform(self, results: Dict) -> Dict:
"""Performs the FormatShape formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if not isinstance(results['imgs'], np.ndarray):
results['imgs'] = np.array(results['imgs'])
# [M x H x W x C]
# M = 1 * N_crops * N_clips * T
if self.collapse:
assert results['num_clips'] == 1
if self.input_format == 'NCTHW':
if 'imgs' in results:
imgs = results['imgs']
num_clips = results['num_clips']
clip_len = results['clip_len']
if isinstance(clip_len, dict):
clip_len = clip_len['RGB']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x T x H x W x C
imgs = np.transpose(imgs, (0, 1, 5, 2, 3, 4))
# N_crops x N_clips x C x T x H x W
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
# M' x C x T x H x W
# M' = N_crops x N_clips
results['imgs'] = imgs
results['input_shape'] = imgs.shape
if 'heatmap_imgs' in results:
imgs = results['heatmap_imgs']
num_clips = results['num_clips']
clip_len = results['clip_len']
# clip_len must be a dict
clip_len = clip_len['Pose']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x T x C x H x W
imgs = np.transpose(imgs, (0, 1, 3, 2, 4, 5))
# N_crops x N_clips x C x T x H x W
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
# M' x C x T x H x W
# M' = N_crops x N_clips
results['heatmap_imgs'] = imgs
results['heatmap_input_shape'] = imgs.shape
elif self.input_format == 'NCTHW_Heatmap':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = results['imgs']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x T x C x H x W
imgs = np.transpose(imgs, (0, 1, 3, 2, 4, 5))
# N_crops x N_clips x C x T x H x W
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
# M' x C x T x H x W
# M' = N_crops x N_clips
results['imgs'] = imgs
results['input_shape'] = imgs.shape
elif self.input_format == 'NCHW':
imgs = results['imgs']
imgs = np.transpose(imgs, (0, 3, 1, 2))
if 'modality' in results and results['modality'] == 'Flow':
clip_len = results['clip_len']
imgs = imgs.reshape((-1, clip_len * imgs.shape[1]) +
imgs.shape[2:])
# M x C x H x W
results['imgs'] = imgs
results['input_shape'] = imgs.shape
elif self.input_format == 'NPTCHW':
num_proposals = results['num_proposals']
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = results['imgs']
imgs = imgs.reshape((num_proposals, num_clips * clip_len) +
imgs.shape[1:])
# P x M x H x W x C
# M = N_clips x T
imgs = np.transpose(imgs, (0, 1, 4, 2, 3))
# P x M x C x H x W
results['imgs'] = imgs
results['input_shape'] = imgs.shape
if self.collapse:
assert results['imgs'].shape[0] == 1
results['imgs'] = results['imgs'].squeeze(0)
results['input_shape'] = results['imgs'].shape
return results | Performs the FormatShape formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/formatting.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""Performs the FormatShape formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
audios = results['audios']
# clip x sample x freq -> clip x channel x sample x freq
clip, sample, freq = audios.shape
audios = audios.reshape(clip, 1, sample, freq)
results['audios'] = audios
results['input_shape'] = audios.shape
return results | Performs the FormatShape formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/formatting.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`FormatGCNInput`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
keypoint = results['keypoint']
if 'keypoint_score' in results:
keypoint = np.concatenate(
(keypoint, results['keypoint_score'][..., None]), axis=-1)
cur_num_person = keypoint.shape[0]
if cur_num_person < self.num_person:
pad_dim = self.num_person - cur_num_person
pad = np.zeros(
(pad_dim, ) + keypoint.shape[1:], dtype=keypoint.dtype)
keypoint = np.concatenate((keypoint, pad), axis=0)
if self.mode == 'loop' and cur_num_person == 1:
for i in range(1, self.num_person):
keypoint[i] = keypoint[0]
elif cur_num_person > self.num_person:
keypoint = keypoint[:self.num_person]
M, T, V, C = keypoint.shape
nc = results.get('num_clips', 1)
assert T % nc == 0
keypoint = keypoint.reshape(
(M, nc, T // nc, V, C)).transpose(1, 0, 2, 3, 4)
results['keypoint'] = np.ascontiguousarray(keypoint)
return results | The transform function of :class:`FormatGCNInput`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/formatting.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py | Apache-2.0 |
def transform(self, results):
"""Perform Torchvision augmentations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
assert 'imgs' in results
imgs = [x.transpose(2, 0, 1) for x in results['imgs']]
imgs = to_tensor(np.stack(imgs))
imgs = self.trans(imgs).data.numpy()
imgs[imgs > 255] = 255
imgs[imgs < 0] = 0
imgs = imgs.astype(np.uint8)
imgs = [x.transpose(1, 2, 0) for x in imgs]
results['imgs'] = imgs
return results | Perform Torchvision augmentations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/wrappers.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py | Apache-2.0 |
def transform(self, results):
"""Perform PytorchVideoTrans augmentations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
assert 'imgs' in results
assert 'gt_bboxes' not in results,\
f'PytorchVideo {self.op} doesn\'t support bboxes yet.'
assert 'proposals' not in results,\
f'PytorchVideo {self.op} doesn\'t support bboxes yet.'
if self.op in ('AugMix', 'RandAugment'):
# list[ndarray(h, w, 3)] -> torch.tensor(t, c, h, w)
imgs = [x.transpose(2, 0, 1) for x in results['imgs']]
imgs = to_tensor(np.stack(imgs))
else:
# list[ndarray(h, w, 3)] -> torch.tensor(c, t, h, w)
# uint8 -> float32
imgs = to_tensor((np.stack(results['imgs']).transpose(3, 0, 1, 2) /
255.).astype(np.float32))
imgs = self.trans(imgs).data.numpy()
if self.op in ('AugMix', 'RandAugment'):
imgs[imgs > 255] = 255
imgs[imgs < 0] = 0
imgs = imgs.astype(np.uint8)
# torch.tensor(t, c, h, w) -> list[ndarray(h, w, 3)]
imgs = [x.transpose(1, 2, 0) for x in imgs]
else:
# float32 -> uint8
imgs = imgs * 255
imgs[imgs > 255] = 255
imgs[imgs < 0] = 0
imgs = imgs.astype(np.uint8)
# torch.tensor(c, t, h, w) -> list[ndarray(h, w, 3)]
imgs = [x for x in imgs.transpose(1, 2, 3, 0)]
results['imgs'] = imgs
return results | Perform PytorchVideoTrans augmentations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/wrappers.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py | Apache-2.0 |
def default_transforms():
"""Default transforms for imgaug.
Implement RandAugment by imgaug.
Please visit `https://arxiv.org/abs/1909.13719` for more information.
Augmenters and hyper parameters are borrowed from the following repo:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py # noqa
Miss one augmenter ``SolarizeAdd`` since imgaug doesn't support this.
Returns:
dict: The constructed RandAugment transforms.
"""
# RandAugment hyper params
num_augmenters = 2
cur_magnitude, max_magnitude = 9, 10
cur_level = 1.0 * cur_magnitude / max_magnitude
return [
dict(
type='SomeOf',
n=num_augmenters,
children=[
dict(
type='ShearX',
shear=17.19 * cur_level * random.choice([-1, 1])),
dict(
type='ShearY',
shear=17.19 * cur_level * random.choice([-1, 1])),
dict(
type='TranslateX',
percent=.2 * cur_level * random.choice([-1, 1])),
dict(
type='TranslateY',
percent=.2 * cur_level * random.choice([-1, 1])),
dict(
type='Rotate',
rotate=30 * cur_level * random.choice([-1, 1])),
dict(type='Posterize', nb_bits=max(1, int(4 * cur_level))),
dict(type='Solarize', threshold=256 * cur_level),
dict(type='EnhanceColor', factor=1.8 * cur_level + .1),
dict(type='EnhanceContrast', factor=1.8 * cur_level + .1),
dict(
type='EnhanceBrightness', factor=1.8 * cur_level + .1),
dict(type='EnhanceSharpness', factor=1.8 * cur_level + .1),
dict(type='Autocontrast', cutoff=0),
dict(type='Equalize'),
dict(type='Invert', p=1.),
dict(
type='Cutout',
nb_iterations=1,
size=0.2 * cur_level,
squared=True)
])
] | Default transforms for imgaug.
Implement RandAugment by imgaug.
Please visit `https://arxiv.org/abs/1909.13719` for more information.
Augmenters and hyper parameters are borrowed from the following repo:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py # noqa
Miss one augmenter ``SolarizeAdd`` since imgaug doesn't support this.
Returns:
dict: The constructed RandAugment transforms. | default_transforms | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/wrappers.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py | Apache-2.0 |
def imgaug_builder(self, cfg):
"""Import a module from imgaug.
It follows the logic of :func:`build_from_cfg`. Use a dict object to
create an iaa.Augmenter object.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj:`iaa.Augmenter`: The constructed imgaug augmenter.
"""
import imgaug.augmenters as iaa
assert isinstance(cfg, dict) and 'type' in cfg
args = cfg.copy()
obj_type = args.pop('type')
if mmengine.is_str(obj_type):
obj_cls = getattr(iaa, obj_type) if hasattr(iaa, obj_type) \
else getattr(iaa.pillike, obj_type)
elif issubclass(obj_type, iaa.Augmenter):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
for aug_list_key in ['children', 'then_list', 'else_list']:
if aug_list_key in args:
args[aug_list_key] = [
self.imgaug_builder(child) for child in args[aug_list_key]
]
return obj_cls(**args) | Import a module from imgaug.
It follows the logic of :func:`build_from_cfg`. Use a dict object to
create an iaa.Augmenter object.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj:`iaa.Augmenter`: The constructed imgaug augmenter. | imgaug_builder | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/wrappers.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py | Apache-2.0 |
def transform(self, results):
"""Perform Imgaug augmentations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
assert results['modality'] == 'RGB', 'Imgaug only support RGB images.'
in_type = results['imgs'][0].dtype
cur_aug = self.aug.to_deterministic()
results['imgs'] = [
cur_aug.augment_image(frame) for frame in results['imgs']
]
img_h, img_w, _ = results['imgs'][0].shape
out_type = results['imgs'][0].dtype
assert in_type == out_type, \
('Imgaug input dtype and output dtype are not the same. ',
f'Convert from {in_type} to {out_type}')
if 'gt_bboxes' in results:
from imgaug.augmentables import bbs
bbox_list = [
bbs.BoundingBox(
x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
for bbox in results['gt_bboxes']
]
bboxes = bbs.BoundingBoxesOnImage(
bbox_list, shape=results['img_shape'])
bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])
results['gt_bboxes'] = [[
max(bbox.x1, 0),
max(bbox.y1, 0),
min(bbox.x2, img_w),
min(bbox.y2, img_h)
] for bbox in bbox_aug.items]
if 'proposals' in results:
bbox_list = [
bbs.BoundingBox(
x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
for bbox in results['proposals']
]
bboxes = bbs.BoundingBoxesOnImage(
bbox_list, shape=results['img_shape'])
bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])
results['proposals'] = [[
max(bbox.x1, 0),
max(bbox.y1, 0),
min(bbox.x2, img_w),
min(bbox.y2, img_h)
] for bbox in bbox_aug.items]
results['img_shape'] = (img_h, img_w)
return results | Perform Imgaug augmentations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/wrappers.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py | Apache-2.0 |
def _init_lazy_if_proper(results, lazy):
"""Initialize lazy operation properly.
Make sure that a lazy operation is properly initialized,
and avoid a non-lazy operation accidentally getting mixed in.
Required keys in results are "imgs" if "img_shape" not in results,
otherwise, Required keys in results are "img_shape", add or modified keys
are "img_shape", "lazy".
Add or modified keys in "lazy" are "original_shape", "crop_bbox", "flip",
"flip_direction", "interpolation".
Args:
results (dict): A dict stores data pipeline result.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
if 'img_shape' not in results:
results['img_shape'] = results['imgs'][0].shape[:2]
if lazy:
if 'lazy' not in results:
img_h, img_w = results['img_shape']
lazyop = dict()
lazyop['original_shape'] = results['img_shape']
lazyop['crop_bbox'] = np.array([0, 0, img_w, img_h],
dtype=np.float32)
lazyop['flip'] = False
lazyop['flip_direction'] = None
lazyop['interpolation'] = None
results['lazy'] = lazyop
else:
assert 'lazy' not in results, 'Use Fuse after lazy operations' | Initialize lazy operation properly.
Make sure that a lazy operation is properly initialized,
and avoid a non-lazy operation accidentally getting mixed in.
Required keys in results are "imgs" if "img_shape" not in results,
otherwise, Required keys in results are "img_shape", add or modified keys
are "img_shape", "lazy".
Add or modified keys in "lazy" are "original_shape", "crop_bbox", "flip",
"flip_direction", "interpolation".
Args:
results (dict): A dict stores data pipeline result.
lazy (bool): Determine whether to apply lazy operation. Default: False. | _init_lazy_if_proper | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results):
"""Fuse lazy operations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if 'lazy' not in results:
raise ValueError('No lazy operation detected')
lazyop = results['lazy']
imgs = results['imgs']
# crop
left, top, right, bottom = lazyop['crop_bbox'].round().astype(int)
imgs = [img[top:bottom, left:right] for img in imgs]
# resize
img_h, img_w = results['img_shape']
if lazyop['interpolation'] is None:
interpolation = 'bilinear'
else:
interpolation = lazyop['interpolation']
imgs = [
mmcv.imresize(img, (img_w, img_h), interpolation=interpolation)
for img in imgs
]
# flip
if lazyop['flip']:
for img in imgs:
mmcv.imflip_(img, lazyop['flip_direction'])
results['imgs'] = imgs
del results['lazy']
return results | Fuse lazy operations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def _crop_kps(kps, crop_bbox):
"""Static method for cropping keypoint."""
return kps - crop_bbox[:2] | Static method for cropping keypoint. | _crop_kps | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def _crop_imgs(imgs, crop_bbox):
"""Static method for cropping images."""
x1, y1, x2, y2 = crop_bbox
return [img[y1:y2, x1:x2] for img in imgs] | Static method for cropping images. | _crop_imgs | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def _box_crop(box, crop_bbox):
"""Crop the bounding boxes according to the crop_bbox.
Args:
box (np.ndarray): The bounding boxes.
crop_bbox(np.ndarray): The bbox used to crop the original image.
"""
x1, y1, x2, y2 = crop_bbox
img_w, img_h = x2 - x1, y2 - y1
box_ = box.copy()
box_[..., 0::2] = np.clip(box[..., 0::2] - x1, 0, img_w - 1)
box_[..., 1::2] = np.clip(box[..., 1::2] - y1, 0, img_h - 1)
return box_ | Crop the bounding boxes according to the crop_bbox.
Args:
box (np.ndarray): The bounding boxes.
crop_bbox(np.ndarray): The bbox used to crop the original image. | _box_crop | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def _all_box_crop(self, results, crop_bbox):
"""Crop the gt_bboxes and proposals in results according to crop_bbox.
Args:
results (dict): All information about the sample, which contain
'gt_bboxes' and 'proposals' (optional).
crop_bbox(np.ndarray): The bbox used to crop the original image.
"""
results['gt_bboxes'] = self._box_crop(results['gt_bboxes'], crop_bbox)
if 'proposals' in results and results['proposals'] is not None:
assert results['proposals'].shape[1] == 4
results['proposals'] = self._box_crop(results['proposals'],
crop_bbox)
return results | Crop the gt_bboxes and proposals in results according to crop_bbox.
Args:
results (dict): All information about the sample, which contain
'gt_bboxes' and 'proposals' (optional).
crop_bbox(np.ndarray): The bbox used to crop the original image. | _all_box_crop | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results):
"""Performs the RandomCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
assert self.size <= img_h and self.size <= img_w
y_offset = 0
x_offset = 0
if img_h > self.size:
y_offset = int(np.random.randint(0, img_h - self.size))
if img_w > self.size:
x_offset = int(np.random.randint(0, img_w - self.size))
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = x_offset / img_w, y_offset / img_h
w_ratio, h_ratio = self.size / img_w, self.size / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_h_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
new_h, new_w = self.size, self.size
crop_bbox = np.array(
[x_offset, y_offset, x_offset + new_w, y_offset + new_h])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = x_offset * (lazy_right - lazy_left) / img_w
right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w
top = y_offset * (lazy_bottom - lazy_top) / img_h
bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
# Process entity boxes
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results | Performs the RandomCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def get_crop_bbox(img_shape,
area_range,
aspect_ratio_range,
max_attempts=10):
"""Get a crop bbox given the area range and aspect ratio range.
Args:
img_shape (Tuple[int]): Image shape
area_range (Tuple[float]): The candidate area scales range of
output cropped images. Default: (0.08, 1.0).
aspect_ratio_range (Tuple[float]): The candidate aspect
ratio range of output cropped images. Default: (3 / 4, 4 / 3).
max_attempts (int): The maximum of attempts. Default: 10.
max_attempts (int): Max attempts times to generate random candidate
bounding box. If it doesn't qualified one, the center bounding
box will be used.
Returns:
(list[int]) A random crop bbox within the area range and aspect
ratio range.
"""
assert 0 < area_range[0] <= area_range[1] <= 1
assert 0 < aspect_ratio_range[0] <= aspect_ratio_range[1]
img_h, img_w = img_shape
area = img_h * img_w
min_ar, max_ar = aspect_ratio_range
aspect_ratios = np.exp(
np.random.uniform(
np.log(min_ar), np.log(max_ar), size=max_attempts))
target_areas = np.random.uniform(*area_range, size=max_attempts) * area
candidate_crop_w = np.round(np.sqrt(target_areas *
aspect_ratios)).astype(np.int32)
candidate_crop_h = np.round(np.sqrt(target_areas /
aspect_ratios)).astype(np.int32)
for i in range(max_attempts):
crop_w = candidate_crop_w[i]
crop_h = candidate_crop_h[i]
if crop_h <= img_h and crop_w <= img_w:
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
return x_offset, y_offset, x_offset + crop_w, y_offset + crop_h
# Fallback
crop_size = min(img_h, img_w)
x_offset = (img_w - crop_size) // 2
y_offset = (img_h - crop_size) // 2
return x_offset, y_offset, x_offset + crop_size, y_offset + crop_size | Get a crop bbox given the area range and aspect ratio range.
Args:
img_shape (Tuple[int]): Image shape
area_range (Tuple[float]): The candidate area scales range of
output cropped images. Default: (0.08, 1.0).
aspect_ratio_range (Tuple[float]): The candidate aspect
ratio range of output cropped images. Default: (3 / 4, 4 / 3).
max_attempts (int): The maximum of attempts. Default: 10.
max_attempts (int): Max attempts times to generate random candidate
bounding box. If it doesn't qualified one, the center bounding
box will be used.
Returns:
(list[int]) A random crop bbox within the area range and aspect
ratio range. | get_crop_bbox | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results):
"""Performs the RandomResizeCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
left, top, right, bottom = self.get_crop_bbox(
(img_h, img_w), self.area_range, self.aspect_ratio_range)
new_h, new_w = bottom - top, right - left
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = left / img_w, top / img_h
w_ratio, h_ratio = new_w / img_w, new_h / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_h_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
crop_bbox = np.array([left, top, right, bottom])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = left * (lazy_right - lazy_left) / img_w
right = right * (lazy_right - lazy_left) / img_w
top = top * (lazy_bottom - lazy_top) / img_h
bottom = bottom * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results | Performs the RandomResizeCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results):
"""Performs the MultiScaleCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
base_size = min(img_h, img_w)
crop_sizes = [int(base_size * s) for s in self.scales]
candidate_sizes = []
for i, h in enumerate(crop_sizes):
for j, w in enumerate(crop_sizes):
if abs(i - j) <= self.max_wh_scale_gap:
candidate_sizes.append([w, h])
crop_size = random.choice(candidate_sizes)
for i in range(2):
if abs(crop_size[i] - self.input_size[i]) < 3:
crop_size[i] = self.input_size[i]
crop_w, crop_h = crop_size
if self.random_crop:
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
else:
w_step = (img_w - crop_w) // 4
h_step = (img_h - crop_h) // 4
candidate_offsets = [
(0, 0), # upper left
(4 * w_step, 0), # upper right
(0, 4 * h_step), # lower left
(4 * w_step, 4 * h_step), # lower right
(2 * w_step, 2 * h_step), # center
]
if self.num_fixed_crops == 13:
extra_candidate_offsets = [
(0, 2 * h_step), # center left
(4 * w_step, 2 * h_step), # center right
(2 * w_step, 4 * h_step), # lower center
(2 * w_step, 0 * h_step), # upper center
(1 * w_step, 1 * h_step), # upper left quarter
(3 * w_step, 1 * h_step), # upper right quarter
(1 * w_step, 3 * h_step), # lower left quarter
(3 * w_step, 3 * h_step) # lower right quarter
]
candidate_offsets.extend(extra_candidate_offsets)
x_offset, y_offset = random.choice(candidate_offsets)
new_h, new_w = crop_h, crop_w
crop_bbox = np.array(
[x_offset, y_offset, x_offset + new_w, y_offset + new_h])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
results['scales'] = self.scales
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = x_offset / img_w, y_offset / img_h
w_ratio, h_ratio = new_w / img_w, new_h / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_h_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = x_offset * (lazy_right - lazy_left) / img_w
right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w
top = y_offset * (lazy_bottom - lazy_top) / img_h
bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results | Performs the MultiScaleCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def _resize_imgs(self, imgs, new_w, new_h):
"""Static method for resizing keypoint."""
return [
mmcv.imresize(
img, (new_w, new_h), interpolation=self.interpolation)
for img in imgs
] | Static method for resizing keypoint. | _resize_imgs | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def _resize_kps(kps, scale_factor):
"""Static method for resizing keypoint."""
return kps * scale_factor | Static method for resizing keypoint. | _resize_kps | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def _box_resize(box, scale_factor):
"""Rescale the bounding boxes according to the scale_factor.
Args:
box (np.ndarray): The bounding boxes.
scale_factor (np.ndarray): The scale factor used for rescaling.
"""
assert len(scale_factor) == 2
scale_factor = np.concatenate([scale_factor, scale_factor])
return box * scale_factor | Rescale the bounding boxes according to the scale_factor.
Args:
box (np.ndarray): The bounding boxes.
scale_factor (np.ndarray): The scale factor used for rescaling. | _box_resize | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results):
"""Performs the Resize augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
if 'scale_factor' not in results:
results['scale_factor'] = np.array([1, 1], dtype=np.float32)
img_h, img_w = results['img_shape']
if self.keep_ratio:
new_w, new_h = mmcv.rescale_size((img_w, img_h), self.scale)
else:
new_w, new_h = self.scale
self.scale_factor = np.array([new_w / img_w, new_h / img_h],
dtype=np.float32)
results['img_shape'] = (new_h, new_w)
results['keep_ratio'] = self.keep_ratio
results['scale_factor'] = results['scale_factor'] * self.scale_factor
if not self.lazy:
if 'imgs' in results:
results['imgs'] = self._resize_imgs(results['imgs'], new_w,
new_h)
if 'keypoint' in results:
results['keypoint'] = self._resize_kps(results['keypoint'],
self.scale_factor)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
lazyop['interpolation'] = self.interpolation
if 'gt_bboxes' in results:
assert not self.lazy
results['gt_bboxes'] = self._box_resize(results['gt_bboxes'],
self.scale_factor)
if 'proposals' in results and results['proposals'] is not None:
assert results['proposals'].shape[1] == 4
results['proposals'] = self._box_resize(
results['proposals'], self.scale_factor)
return results | Performs the Resize augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results):
"""Performs the Resize augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
short_edge = np.random.randint(self.scale_range[0],
self.scale_range[1] + 1)
resize = Resize((-1, short_edge),
keep_ratio=True,
interpolation=self.interpolation,
lazy=False)
results = resize(results)
results['short_edge'] = short_edge
return results | Performs the Resize augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def _flip_imgs(self, imgs, modality):
"""Utility function for flipping images."""
_ = [mmcv.imflip_(img, self.direction) for img in imgs]
lt = len(imgs)
if modality == 'Flow':
# The 1st frame of each 2 frames is flow-x
for i in range(0, lt, 2):
imgs[i] = mmcv.iminvert(imgs[i])
return imgs | Utility function for flipping images. | _flip_imgs | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def _flip_kps(self, kps, kpscores, img_width):
"""Utility function for flipping keypoint."""
kp_x = kps[..., 0]
kp_x[kp_x != 0] = img_width - kp_x[kp_x != 0]
new_order = list(range(kps.shape[2]))
if self.left_kp is not None and self.right_kp is not None:
for left, right in zip(self.left_kp, self.right_kp):
new_order[left] = right
new_order[right] = left
kps = kps[:, :, new_order]
if kpscores is not None:
kpscores = kpscores[:, :, new_order]
return kps, kpscores | Utility function for flipping keypoint. | _flip_kps | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def _box_flip(box, img_width):
"""Flip the bounding boxes given the width of the image.
Args:
box (np.ndarray): The bounding boxes.
img_width (int): The img width.
"""
box_ = box.copy()
box_[..., 0::4] = img_width - box[..., 2::4]
box_[..., 2::4] = img_width - box[..., 0::4]
return box_ | Flip the bounding boxes given the width of the image.
Args:
box (np.ndarray): The bounding boxes.
img_width (int): The img width. | _box_flip | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results):
"""Performs the Flip augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
assert self.direction == 'horizontal', (
'Only horizontal flips are'
'supported for human keypoints')
modality = results['modality']
if modality == 'Flow':
assert self.direction == 'horizontal'
flip = np.random.rand() < self.flip_ratio
results['flip'] = flip
results['flip_direction'] = self.direction
img_width = results['img_shape'][1]
if self.flip_label_map is not None and flip:
results['label'] = self.flip_label_map.get(results['label'],
results['label'])
if not self.lazy:
if flip:
if 'imgs' in results:
results['imgs'] = self._flip_imgs(results['imgs'],
modality)
if 'keypoint' in results:
kp = results['keypoint']
kpscore = results.get('keypoint_score', None)
kp, kpscore = self._flip_kps(kp, kpscore, img_width)
results['keypoint'] = kp
if 'keypoint_score' in results:
results['keypoint_score'] = kpscore
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Use one Flip please')
lazyop['flip'] = flip
lazyop['flip_direction'] = self.direction
if 'gt_bboxes' in results and flip:
assert not self.lazy and self.direction == 'horizontal'
width = results['img_shape'][1]
results['gt_bboxes'] = self._box_flip(results['gt_bboxes'], width)
if 'proposals' in results and results['proposals'] is not None:
assert results['proposals'].shape[1] == 4
results['proposals'] = self._box_flip(results['proposals'],
width)
return results | Performs the Flip augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results):
"""Perform ColorJitter.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
imgs = results['imgs']
num_clips, clip_len = 1, len(imgs)
new_imgs = []
for i in range(num_clips):
b = np.random.uniform(
low=self.brightness[0], high=self.brightness[1])
c = np.random.uniform(low=self.contrast[0], high=self.contrast[1])
s = np.random.uniform(
low=self.saturation[0], high=self.saturation[1])
h = np.random.uniform(low=self.hue[0], high=self.hue[1])
start, end = i * clip_len, (i + 1) * clip_len
for img in imgs[start:end]:
img = img.astype(np.float32)
for fn_id in self.fn_idx:
if fn_id == 0 and b != 1:
img *= b
if fn_id == 1 and c != 1:
img = self.adjust_contrast(img, c)
if fn_id == 2 and s != 1:
img = self.adjust_saturation(img, s)
if fn_id == 3 and h != 0:
img = self.adjust_hue(img, h)
img = np.clip(img, 0, 255).astype(np.uint8)
new_imgs.append(img)
results['imgs'] = new_imgs
return results | Perform ColorJitter.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results):
"""Performs the CenterCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
crop_w, crop_h = self.crop_size
left = (img_w - crop_w) // 2
top = (img_h - crop_h) // 2
right = left + crop_w
bottom = top + crop_h
new_h, new_w = bottom - top, right - left
crop_bbox = np.array([left, top, right, bottom])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = left / img_w, top / img_h
w_ratio, h_ratio = new_w / img_w, new_h / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_h_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = left * (lazy_right - lazy_left) / img_w
right = right * (lazy_right - lazy_left) / img_w
top = top * (lazy_bottom - lazy_top) / img_h
bottom = bottom * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results | Performs the CenterCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results):
"""Performs the ThreeCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, False)
if 'gt_bboxes' in results or 'proposals' in results:
warnings.warn('ThreeCrop cannot process bounding boxes')
imgs = results['imgs']
img_h, img_w = results['imgs'][0].shape[:2]
crop_w, crop_h = self.crop_size
assert crop_h == img_h or crop_w == img_w
if crop_h == img_h:
w_step = (img_w - crop_w) // 2
offsets = [
(0, 0), # left
(2 * w_step, 0), # right
(w_step, 0), # middle
]
elif crop_w == img_w:
h_step = (img_h - crop_h) // 2
offsets = [
(0, 0), # top
(0, 2 * h_step), # down
(0, h_step), # middle
]
cropped = []
crop_bboxes = []
for x_offset, y_offset in offsets:
bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h]
crop = [
img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
cropped.extend(crop)
crop_bboxes.extend([bbox for _ in range(len(imgs))])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = cropped
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results | Performs the ThreeCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results):
"""Performs the TenCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, False)
if 'gt_bboxes' in results or 'proposals' in results:
warnings.warn('TenCrop cannot process bounding boxes')
imgs = results['imgs']
img_h, img_w = results['imgs'][0].shape[:2]
crop_w, crop_h = self.crop_size
w_step = (img_w - crop_w) // 4
h_step = (img_h - crop_h) // 4
offsets = [
(0, 0), # upper left
(4 * w_step, 0), # upper right
(0, 4 * h_step), # lower left
(4 * w_step, 4 * h_step), # lower right
(2 * w_step, 2 * h_step), # center
]
img_crops = list()
crop_bboxes = list()
for x_offset, y_offsets in offsets:
crop = [
img[y_offsets:y_offsets + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
flip_crop = [np.flip(c, axis=1).copy() for c in crop]
bbox = [x_offset, y_offsets, x_offset + crop_w, y_offsets + crop_h]
img_crops.extend(crop)
img_crops.extend(flip_crop)
crop_bboxes.extend([bbox for _ in range(len(imgs) * 2)])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = img_crops
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results | Performs the TenCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def _img_fill_pixels(self, img, top, left, h, w):
"""Fill pixels to the patch of image."""
if self.mode == 'const':
patch = np.empty((h, w, 3), dtype=np.uint8)
patch[:, :] = np.array(self.fill_color, dtype=np.uint8)
elif self.fill_std is None:
# Uniform distribution
patch = np.random.uniform(0, 256, (h, w, 3)).astype(np.uint8)
else:
# Normal distribution
patch = np.random.normal(self.fill_color, self.fill_std, (h, w, 3))
patch = np.clip(patch.astype(np.int32), 0, 255).astype(np.uint8)
img[top:top + h, left:left + w] = patch
return img | Fill pixels to the patch of image. | _img_fill_pixels | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def _fill_pixels(self, imgs, top, left, h, w):
"""Fill pixels to the patch of each image in frame clip."""
return [self._img_fill_pixels(img, top, left, h, w) for img in imgs] | Fill pixels to the patch of each image in frame clip. | _fill_pixels | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def random_disable(self):
"""Randomly disable the transform."""
return np.random.rand() > self.erase_prob | Randomly disable the transform. | random_disable | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def random_patch(self, img_h, img_w):
"""Randomly generate patch the erase."""
# convert the aspect ratio to log space to equally handle width and
# height.
log_aspect_range = np.log(
np.array(self.aspect_range, dtype=np.float32))
aspect_ratio = np.exp(np.random.uniform(*log_aspect_range))
area = img_h * img_w
area *= np.random.uniform(self.min_area_ratio, self.max_area_ratio)
h = min(int(round(np.sqrt(area * aspect_ratio))), img_h)
w = min(int(round(np.sqrt(area / aspect_ratio))), img_w)
top = np.random.randint(0, img_h - h) if img_h > h else 0
left = np.random.randint(0, img_w - w) if img_w > w else 0
return top, left, h, w | Randomly generate patch the erase. | random_patch | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results):
"""
Args:
results (dict): Results dict from pipeline
Returns:
dict: Results after the transformation.
"""
if self.random_disable():
return results
imgs = results['imgs']
img_h, img_w = imgs[0].shape[:2]
imgs = self._fill_pixels(imgs, *self.random_patch(img_h, img_w))
results['imgs'] = imgs
return results | Args:
results (dict): Results dict from pipeline
Returns:
dict: Results after the transformation. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/processing.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`CLIPTokenize`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
try:
import clip
except ImportError:
raise ImportError('Please run `pip install '
'git+https://github.com/openai/CLIP.git` '
'to install clip first. ')
text = results['text']
text_tokenized = clip.tokenize(text)[0]
results['text'] = text_tokenized
return results | The transform function of :class:`CLIPTokenize`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/text_transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/text_transforms.py | Apache-2.0 |
def transform(self, results: dict) -> dict:
"""Functions to load image.
Args:
results (dict): Result dict from :obj:``mmcv.BaseDataset``.
Returns:
dict: The dict contains loaded image and meta information.
"""
filename = results['img_path']
try:
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(
img_bytes,
flag=self.color_type,
channel_order='rgb',
backend=self.imdecode_backend)
except Exception as e:
if self.ignore_empty:
return None
else:
raise e
if self.to_float32:
img = img.astype(np.float32)
results['img'] = img
results['img_shape'] = img.shape[:2]
results['ori_shape'] = img.shape[:2]
return results | Functions to load image.
Args:
results (dict): Result dict from :obj:``mmcv.BaseDataset``.
Returns:
dict: The dict contains loaded image and meta information. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def init_hvu_info(self, categories, category_nums):
"""Initialize hvu information."""
assert len(categories) == len(category_nums)
self.categories = categories
self.category_nums = category_nums
self.num_categories = len(self.categories)
self.num_tags = sum(self.category_nums)
self.category2num = dict(zip(categories, category_nums))
self.start_idx = [0]
for i in range(self.num_categories - 1):
self.start_idx.append(self.start_idx[-1] + self.category_nums[i])
self.category2startidx = dict(zip(categories, self.start_idx))
self.hvu_initialized = True | Initialize hvu information. | init_hvu_info | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Convert the label dictionary to 3 tensors: "label", "mask" and
"category_mask".
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if not self.hvu_initialized:
self.init_hvu_info(results['categories'], results['category_nums'])
onehot = torch.zeros(self.num_tags)
onehot_mask = torch.zeros(self.num_tags)
category_mask = torch.zeros(self.num_categories)
for category, tags in results['label'].items():
# skip if not training on this category
if category not in self.categories:
continue
category_mask[self.categories.index(category)] = 1.
start_idx = self.category2startidx[category]
category_num = self.category2num[category]
tags = [idx + start_idx for idx in tags]
onehot[tags] = 1.
onehot_mask[start_idx:category_num + start_idx] = 1.
results['label'] = onehot
results['mask'] = onehot_mask
results['category_mask'] = category_mask
return results | Convert the label dictionary to 3 tensors: "label", "mask" and
"category_mask".
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _get_train_clips(self, num_frames: int,
ori_clip_len: float) -> np.array:
"""Get clip offsets in train mode.
It will calculate the average interval for selected frames,
and randomly shift them within offsets between [0, avg_interval].
If the total number of frames is smaller than clips num or origin
frames length, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
ori_clip_len (float): length of original sample clip.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
if self.keep_tail_frames:
avg_interval = (num_frames - ori_clip_len + 1) / float(
self.num_clips)
if num_frames > ori_clip_len - 1:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = (base_offsets + np.random.uniform(
0, avg_interval, self.num_clips)).astype(np.int32)
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int32)
else:
avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips
if avg_interval > 0:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = base_offsets + np.random.randint(
avg_interval, size=self.num_clips)
elif num_frames > max(self.num_clips, ori_clip_len):
clip_offsets = np.sort(
np.random.randint(
num_frames - ori_clip_len + 1, size=self.num_clips))
elif avg_interval == 0:
ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips
clip_offsets = np.around(np.arange(self.num_clips) * ratio)
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int32)
return clip_offsets | Get clip offsets in train mode.
It will calculate the average interval for selected frames,
and randomly shift them within offsets between [0, avg_interval].
If the total number of frames is smaller than clips num or origin
frames length, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
ori_clip_len (float): length of original sample clip.
Returns:
np.ndarray: Sampled frame indices in train mode. | _get_train_clips | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _get_test_clips(self, num_frames: int,
ori_clip_len: float) -> np.array:
"""Get clip offsets in test mode.
If the total number of frames is
not enough, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
ori_clip_len (float): length of original sample clip.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
if self.clip_len == 1: # 2D recognizer
# assert self.frame_interval == 1
avg_interval = num_frames / float(self.num_clips)
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = base_offsets + avg_interval / 2.0
if self.twice_sample:
clip_offsets = np.concatenate([clip_offsets, base_offsets])
else: # 3D recognizer
max_offset = max(num_frames - ori_clip_len, 0)
if self.twice_sample:
num_clips = self.num_clips * 2
else:
num_clips = self.num_clips
if num_clips > 1:
num_segments = self.num_clips - 1
# align test sample strategy with `PySlowFast` repo
if self.target_fps is not None:
offset_between = np.floor(max_offset / float(num_segments))
clip_offsets = np.arange(num_clips) * offset_between
else:
offset_between = max_offset / float(num_segments)
clip_offsets = np.arange(num_clips) * offset_between
clip_offsets = np.round(clip_offsets)
else:
clip_offsets = np.array([max_offset // 2])
return clip_offsets | Get clip offsets in test mode.
If the total number of frames is
not enough, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
ori_clip_len (float): length of original sample clip.
Returns:
np.ndarray: Sampled frame indices in test mode. | _get_test_clips | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _sample_clips(self, num_frames: int, ori_clip_len: float) -> np.array:
"""Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices.
"""
if self.test_mode:
clip_offsets = self._get_test_clips(num_frames, ori_clip_len)
else:
clip_offsets = self._get_train_clips(num_frames, ori_clip_len)
return clip_offsets | Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices. | _sample_clips | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _get_ori_clip_len(self, fps_scale_ratio: float) -> float:
"""calculate length of clip segment for different strategy.
Args:
fps_scale_ratio (float): Scale ratio to adjust fps.
"""
if self.target_fps is not None:
# align test sample strategy with `PySlowFast` repo
ori_clip_len = self.clip_len * self.frame_interval
ori_clip_len = np.maximum(1, ori_clip_len * fps_scale_ratio)
elif self.test_mode:
ori_clip_len = (self.clip_len - 1) * self.frame_interval + 1
else:
ori_clip_len = self.clip_len * self.frame_interval
return ori_clip_len | calculate length of clip segment for different strategy.
Args:
fps_scale_ratio (float): Scale ratio to adjust fps. | _get_ori_clip_len | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results: dict) -> dict:
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
# if can't get fps, same value of `fps` and `target_fps`
# will perform nothing
fps = results.get('avg_fps')
if self.target_fps is None or not fps:
fps_scale_ratio = 1.0
else:
fps_scale_ratio = fps / self.target_fps
ori_clip_len = self._get_ori_clip_len(fps_scale_ratio)
clip_offsets = self._sample_clips(total_frames, ori_clip_len)
if self.target_fps:
frame_inds = clip_offsets[:, None] + np.linspace(
0, ori_clip_len - 1, self.clip_len).astype(np.int32)
else:
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
frame_inds = frame_inds.reshape((-1, self.clip_len))
if self.out_of_bound_opt == 'loop':
frame_inds = np.mod(frame_inds, total_frames)
elif self.out_of_bound_opt == 'repeat_last':
safe_inds = frame_inds < total_frames
unsafe_inds = 1 - safe_inds
last_ind = np.max(safe_inds * frame_inds, axis=1)
new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T)
frame_inds = new_inds
else:
raise ValueError('Illegal out_of_bound option.')
start_index = results['start_index']
frame_inds = np.concatenate(frame_inds) + start_index
results['frame_inds'] = frame_inds.astype(np.int32)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = self.num_clips
return results | Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _get_sample_clips(self, num_frames: int) -> np.ndarray:
"""To sample an n-frame clip from the video. UniformSample basically
divides the video into n segments of equal length and randomly samples
one frame from each segment. When the duration of video frames is
shorter than the desired length of the target clip, this approach will
duplicate the sampled frame instead of looping the sample in "loop"
mode. In the test mode, when we need to sample multiple clips,
specifically 'n' clips, this method will further divide the segments
based on the number of clips to be sampled. The 'i-th' clip will.
sample the frame located at the position 'i * len(segment) / n'
within the segment.
Args:
num_frames (int): Total number of frame in the video.
Returns:
seq (np.ndarray): the indexes of frames of sampled from the video.
"""
seg_size = float(num_frames - 1) / self.clip_len
inds = []
if not self.test_mode:
for i in range(self.clip_len):
start = int(np.round(seg_size * i))
end = int(np.round(seg_size * (i + 1)))
inds.append(np.random.randint(start, end + 1))
else:
duration = seg_size / (self.num_clips + 1)
for k in range(self.num_clips):
for i in range(self.clip_len):
start = int(np.round(seg_size * i))
frame_index = start + int(duration * (k + 1))
inds.append(frame_index)
return np.array(inds) | To sample an n-frame clip from the video. UniformSample basically
divides the video into n segments of equal length and randomly samples
one frame from each segment. When the duration of video frames is
shorter than the desired length of the target clip, this approach will
duplicate the sampled frame instead of looping the sample in "loop"
mode. In the test mode, when we need to sample multiple clips,
specifically 'n' clips, this method will further divide the segments
based on the number of clips to be sampled. The 'i-th' clip will.
sample the frame located at the position 'i * len(segment) / n'
within the segment.
Args:
num_frames (int): Total number of frame in the video.
Returns:
seq (np.ndarray): the indexes of frames of sampled from the video. | _get_sample_clips | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""Perform the Uniform Sampling.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
num_frames = results['total_frames']
inds = self._get_sample_clips(num_frames)
start_index = results['start_index']
inds = inds + start_index
results['frame_inds'] = inds.astype(np.int32)
results['clip_len'] = self.clip_len
results['frame_interval'] = None
results['num_clips'] = self.num_clips
return results | Perform the Uniform Sampling.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
start_index = results['start_index']
clip_centers = np.arange(self.clip_interval // 2, total_frames,
self.clip_interval)
num_clips = clip_centers.shape[0]
frame_inds = clip_centers[:, None] + np.arange(
-(self.clip_len // 2 * self.frame_interval),
self.frame_interval *
(self.clip_len -
(self.clip_len // 2)), self.frame_interval)[None, :]
# clip frame_inds to legal range
frame_inds = np.clip(frame_inds, 0, total_frames - 1)
frame_inds = np.concatenate(frame_inds) + start_index
results['frame_inds'] = frame_inds.astype(np.int32)
results['clip_len'] = self.clip_len
results['clip_interval'] = self.clip_interval
results['frame_interval'] = self.frame_interval
results['num_clips'] = num_clips
return results | Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _get_train_clips(self, num_frames: int) -> np.array:
"""Get clip offsets by dense sample strategy in train mode.
It will calculate a sample position and sample interval and set
start index 0 when sample_pos == 1 or randomly choose from
[0, sample_pos - 1]. Then it will shift the start index by each
base offset.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
sample_position = max(1, 1 + num_frames - self.sample_range)
interval = self.sample_range // self.num_clips
start_idx = 0 if sample_position == 1 else np.random.randint(
0, sample_position - 1)
base_offsets = np.arange(self.num_clips) * interval
clip_offsets = (base_offsets + start_idx) % num_frames
return clip_offsets | Get clip offsets by dense sample strategy in train mode.
It will calculate a sample position and sample interval and set
start index 0 when sample_pos == 1 or randomly choose from
[0, sample_pos - 1]. Then it will shift the start index by each
base offset.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode. | _get_train_clips | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _get_test_clips(self, num_frames: int) -> np.array:
"""Get clip offsets by dense sample strategy in test mode.
It will calculate a sample position and sample interval and evenly
sample several start indexes as start positions between
[0, sample_position-1]. Then it will shift each start index by the
base offsets.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
sample_position = max(1, 1 + num_frames - self.sample_range)
interval = self.sample_range // self.num_clips
start_list = np.linspace(
0, sample_position - 1, num=self.num_sample_positions, dtype=int)
base_offsets = np.arange(self.num_clips) * interval
clip_offsets = list()
for start_idx in start_list:
clip_offsets.extend((base_offsets + start_idx) % num_frames)
clip_offsets = np.array(clip_offsets)
return clip_offsets | Get clip offsets by dense sample strategy in test mode.
It will calculate a sample position and sample interval and evenly
sample several start indexes as start positions between
[0, sample_position-1]. Then it will shift each start index by the
base offsets.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode. | _get_test_clips | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _sample_clips(self, num_frames: int) -> np.array:
"""Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices.
"""
if self.test_mode:
clip_offsets = self._get_test_clips(num_frames)
else:
clip_offsets = self._get_train_clips(num_frames)
return clip_offsets | Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices. | _sample_clips | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results: dict) -> dict:
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
clip_offsets = self._sample_clips(total_frames)
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
frame_inds = frame_inds.reshape((-1, self.clip_len))
if self.out_of_bound_opt == 'loop':
frame_inds = np.mod(frame_inds, total_frames)
elif self.out_of_bound_opt == 'repeat_last':
safe_inds = frame_inds < total_frames
unsafe_inds = 1 - safe_inds
last_ind = np.max(safe_inds * frame_inds, axis=1)
new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T)
frame_inds = new_inds
else:
raise ValueError('Illegal out_of_bound option.')
start_index = results['start_index']
frame_inds = np.concatenate(frame_inds) + start_index
results['frame_inds'] = frame_inds.astype(np.int32)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = self.num_clips
return results | Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _get_clips(self, center_index, skip_offsets, shot_info):
"""Get clip offsets."""
start = center_index - (self.clip_len // 2) * self.frame_interval
end = center_index + ((self.clip_len + 1) // 2) * self.frame_interval
frame_inds = list(range(start, end, self.frame_interval))
if not self.test_mode:
frame_inds = frame_inds + skip_offsets
frame_inds = np.clip(frame_inds, shot_info[0], shot_info[1] - 1)
return frame_inds | Get clip offsets. | _get_clips | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
fps = results['fps']
timestamp = results['timestamp']
timestamp_start = results['timestamp_start']
start_index = results.get('start_index', 0)
if results.get('total_frames') is not None:
shot_info = (0, results['total_frames'])
else:
shot_info = results['shot_info']
center_index = fps * (timestamp - timestamp_start) + start_index
skip_offsets = np.random.randint(
-self.frame_interval // 2, (self.frame_interval + 1) // 2,
size=self.clip_len)
frame_inds = self._get_clips(center_index, skip_offsets, shot_info)
frame_inds = np.array(frame_inds, dtype=np.int32) + start_index
results['frame_inds'] = frame_inds
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = 1
results['crop_quadruple'] = np.array([0, 0, 1, 1], dtype=np.float32)
return results | Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the PyAV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import av
except ImportError:
raise ImportError('Please run "conda install av -c conda-forge" '
'or "pip install av" to install PyAV first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
container = av.open(file_obj)
results['video_reader'] = container
results['total_frames'] = container.streams.video[0].frames
return results | Perform the PyAV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def frame_generator(container, stream):
"""Frame generator for PyAV."""
for packet in container.demux(stream):
for frame in packet.decode():
if frame:
return frame.to_rgb().to_ndarray() | Frame generator for PyAV. | frame_generator | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the PyAV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if self.multi_thread:
container.streams.video[0].thread_type = 'AUTO'
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
if self.mode == 'accurate':
# set max indice to make early stop
max_inds = max(results['frame_inds'])
i = 0
for frame in container.decode(video=0):
if i > max_inds + 1:
break
imgs.append(frame.to_rgb().to_ndarray())
i += 1
# the available frame in pyav may be less than its length,
# which may raise error
results['imgs'] = [
imgs[i % len(imgs)] for i in results['frame_inds']
]
elif self.mode == 'efficient':
for frame in container.decode(video=0):
backup_frame = frame
break
stream = container.streams.video[0]
for idx in results['frame_inds']:
pts_scale = stream.average_rate * stream.time_base
frame_pts = int(idx / pts_scale)
container.seek(
frame_pts, any_frame=False, backward=True, stream=stream)
frame = self.frame_generator(container, stream)
if frame is not None:
imgs.append(frame)
backup_frame = frame
else:
imgs.append(backup_frame)
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
results['video_reader'] = None
del container
return results | Perform the PyAV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the PIMS initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import pims
except ImportError:
raise ImportError('Please run "conda install pims -c conda-forge" '
'or "pip install pims" to install pims first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
if self.mode == 'accurate':
container = pims.PyAVReaderIndexed(file_obj)
else:
container = pims.PyAVReaderTimed(file_obj)
results['video_reader'] = container
results['total_frames'] = len(container)
return results | Perform the PIMS initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the PIMS decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
frame_inds = results['frame_inds']
imgs = [container[idx] for idx in frame_inds]
results['video_reader'] = None
del container
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results | Perform the PIMS decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _parse_vectors(mv, vectors, height, width):
"""Parse the returned vectors."""
(w, h, src_x, src_y, dst_x,
dst_y) = (vectors['w'], vectors['h'], vectors['src_x'],
vectors['src_y'], vectors['dst_x'], vectors['dst_y'])
val_x = dst_x - src_x
val_y = dst_y - src_y
start_x = dst_x - w // 2
start_y = dst_y - h // 2
end_x = start_x + w
end_y = start_y + h
for sx, ex, sy, ey, vx, vy in zip(start_x, end_x, start_y, end_y,
val_x, val_y):
if (sx >= 0 and ex < width and sy >= 0 and ey < height):
mv[sy:ey, sx:ex] = (vx, vy)
return mv | Parse the returned vectors. | _parse_vectors | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the PyAV motion vector decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if self.multi_thread:
container.streams.video[0].thread_type = 'AUTO'
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
# set max index to make early stop
max_idx = max(results['frame_inds'])
i = 0
stream = container.streams.video[0]
codec_context = stream.codec_context
codec_context.options = {'flags2': '+export_mvs'}
for packet in container.demux(stream):
for frame in packet.decode():
if i > max_idx + 1:
break
i += 1
height = frame.height
width = frame.width
mv = np.zeros((height, width, 2), dtype=np.int8)
vectors = frame.side_data.get('MOTION_VECTORS')
if frame.key_frame:
# Key frame don't have motion vectors
assert vectors is None
if vectors is not None and len(vectors) > 0:
mv = self._parse_vectors(mv, vectors.to_ndarray(), height,
width)
imgs.append(mv)
results['video_reader'] = None
del container
# the available frame in pyav may be less than its length,
# which may raise error
results['motion_vectors'] = np.array(
[imgs[i % len(imgs)] for i in results['frame_inds']])
return results | Perform the PyAV motion vector decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""Perform the Decord initialization.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
container = self._get_video_reader(results['filename'])
results['total_frames'] = len(container)
results['video_reader'] = container
results['avg_fps'] = container.get_avg_fps()
return results | Perform the Decord initialization.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""Perform the Decord decoding.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
container = results['video_reader']
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
frame_inds = results['frame_inds']
imgs = self._decord_load_frames(container, frame_inds)
results['video_reader'] = None
del container
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
# we resize the gt_bboxes and proposals to their real scale
if 'gt_bboxes' in results:
h, w = results['img_shape']
scale_factor = np.array([w, h, w, h])
gt_bboxes = results['gt_bboxes']
gt_bboxes = (gt_bboxes * scale_factor).astype(np.float32)
results['gt_bboxes'] = gt_bboxes
if 'proposals' in results and results['proposals'] is not None:
proposals = results['proposals']
proposals = (proposals * scale_factor).astype(np.float32)
results['proposals'] = proposals
return results | Perform the Decord decoding.
Args:
results (dict): The result dict.
Returns:
dict: The result dict. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results: dict) -> dict:
"""Perform the OpenCV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if self.io_backend == 'disk':
new_path = results['filename']
else:
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
thread_id = get_thread_id()
# save the file of same thread at the same place
new_path = osp.join(self.tmp_folder, f'tmp_{thread_id}.mp4')
with open(new_path, 'wb') as f:
f.write(self.file_client.get(results['filename']))
container = mmcv.VideoReader(new_path)
results['new_path'] = new_path
results['video_reader'] = container
results['total_frames'] = len(container)
return results | Perform the OpenCV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results: dict) -> dict:
"""Perform the OpenCV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
for frame_ind in results['frame_inds']:
cur_frame = container[frame_ind]
# last frame may be None in OpenCV
while isinstance(cur_frame, type(None)):
frame_ind -= 1
cur_frame = container[frame_ind]
imgs.append(cur_frame)
results['video_reader'] = None
del container
imgs = np.array(imgs)
# The default channel order of OpenCV is BGR, thus we change it to RGB
imgs = imgs[:, :, :, ::-1]
results['imgs'] = list(imgs)
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results | Perform the OpenCV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results: dict) -> dict:
"""Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
directory = results['frame_dir']
filename_tmpl = results['filename_tmpl']
modality = results['modality']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
cache = {}
for i, frame_idx in enumerate(results['frame_inds']):
# Avoid loading duplicated frames
if frame_idx in cache:
imgs.append(cp.deepcopy(imgs[cache[frame_idx]]))
continue
else:
cache[frame_idx] = i
frame_idx += offset
if modality == 'RGB':
filepath = osp.join(directory, filename_tmpl.format(frame_idx))
img_bytes = self.file_client.get(filepath)
# Get frame with channel order RGB directly.
cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(cur_frame)
elif modality == 'Flow':
x_filepath = osp.join(directory,
filename_tmpl.format('x', frame_idx))
y_filepath = osp.join(directory,
filename_tmpl.format('y', frame_idx))
x_img_bytes = self.file_client.get(x_filepath)
x_frame = mmcv.imfrombytes(x_img_bytes, flag='grayscale')
y_img_bytes = self.file_client.get(y_filepath)
y_frame = mmcv.imfrombytes(y_img_bytes, flag='grayscale')
imgs.append(np.stack([x_frame, y_frame], axis=-1))
else:
raise NotImplementedError
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
# we resize the gt_bboxes and proposals to their real scale
if 'gt_bboxes' in results:
h, w = results['img_shape']
scale_factor = np.array([w, h, w, h])
gt_bboxes = results['gt_bboxes']
gt_bboxes = (gt_bboxes * scale_factor).astype(np.float32)
results['gt_bboxes'] = gt_bboxes
if 'proposals' in results and results['proposals'] is not None:
proposals = results['proposals']
proposals = (proposals * scale_factor).astype(np.float32)
results['proposals'] = proposals
return results | Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
modality = results['modality']
array = results['array']
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
for i, frame_idx in enumerate(results['frame_inds']):
frame_idx += offset
if modality == 'RGB':
imgs.append(array[frame_idx])
elif modality == 'Flow':
imgs.extend(
[array[frame_idx, ..., 0], array[frame_idx, ..., 1]])
else:
raise NotImplementedError
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results | Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the ``ImageDecode`` to load image given the file path.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
filename = results['filename']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(img)
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results | Perform the ``ImageDecode`` to load image given the file path.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _zero_pad(shape: int) -> np.ndarray:
"""Zero padding method."""
return np.zeros(shape, dtype=np.float32) | Zero padding method. | _zero_pad | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _random_pad(shape: int) -> np.ndarray:
"""Random padding method."""
# spectrogram is normalized into a distribution of 0~1
return np.random.rand(shape).astype(np.float32) | Random padding method. | _random_pad | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""Perform the numpy loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if osp.exists(results['audio_path']):
feature_map = np.load(results['audio_path'])
else:
# Generate a random dummy 10s input
# Some videos do not have audio stream
pad_func = getattr(self, f'_{self.pad_method}_pad')
feature_map = pad_func((640, 80))
results['length'] = feature_map.shape[0]
results['audios'] = feature_map
return results | Perform the numpy loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the building of pseudo clips.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
# the input should be one single image
assert len(results['imgs']) == 1
im = results['imgs'][0]
for _ in range(1, self.clip_len):
results['imgs'].append(np.copy(im))
results['clip_len'] = self.clip_len
results['num_clips'] = 1
return results | Perform the building of pseudo clips.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results: Dict) -> Dict:
"""Perform the ``AudioFeatureSelector`` to pick audio feature clips.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
audio = results['audios']
frame_inds = results['frame_inds']
num_clips = results['num_clips']
resampled_clips = list()
frame_inds = frame_inds.reshape(num_clips, -1)
for clip_idx in range(num_clips):
clip_frame_inds = frame_inds[clip_idx]
start_idx = max(
0,
int(
round((clip_frame_inds[0] + 1) / results['total_frames'] *
results['length'])))
end_idx = min(
results['length'],
int(
round((clip_frame_inds[-1] + 1) / results['total_frames'] *
results['length'])))
cropped_audio = audio[start_idx:end_idx, :]
if cropped_audio.shape[0] >= self.fixed_length:
truncated_audio = cropped_audio[:self.fixed_length, :]
else:
truncated_audio = np.pad(
cropped_audio,
((0, self.fixed_length - cropped_audio.shape[0]), (0, 0)),
mode='constant')
resampled_clips.append(truncated_audio)
results['audios'] = np.array(resampled_clips)
results['audios_shape'] = results['audios'].shape
return results | Perform the ``AudioFeatureSelector`` to pick audio feature clips.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the LoadLocalizationFeature loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
data_path = results['feature_path']
raw_feature = np.loadtxt(
data_path, dtype=np.float32, delimiter=',', skiprows=1)
results['raw_feature'] = np.transpose(raw_feature, (1, 0))
return results | Perform the LoadLocalizationFeature loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the GenerateLocalizationLabels loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_frame = results['duration_frame']
video_second = results['duration_second']
feature_frame = results['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second
annotations = results['annotations']
gt_bbox = []
for annotation in annotations:
current_start = max(
min(1, annotation['segment'][0] / corrected_second), 0)
current_end = max(
min(1, annotation['segment'][1] / corrected_second), 0)
gt_bbox.append([current_start, current_end])
gt_bbox = np.array(gt_bbox)
results['gt_bbox'] = gt_bbox
return results | Perform the GenerateLocalizationLabels loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def transform(self, results):
"""Perform the LoadProposals loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_name = results['video_name']
proposal_path = osp.join(self.pgm_proposals_dir,
video_name + self.proposal_ext)
if self.proposal_ext == '.csv':
pgm_proposals = np.loadtxt(
proposal_path, dtype=np.float32, delimiter=',', skiprows=1)
pgm_proposals = np.array(pgm_proposals[:self.top_k])
tmin = pgm_proposals[:, 0]
tmax = pgm_proposals[:, 1]
tmin_score = pgm_proposals[:, 2]
tmax_score = pgm_proposals[:, 3]
reference_temporal_iou = pgm_proposals[:, 5]
feature_path = osp.join(self.pgm_features_dir,
video_name + self.feature_ext)
if self.feature_ext == '.npy':
bsp_feature = np.load(feature_path).astype(np.float32)
bsp_feature = bsp_feature[:self.top_k, :]
results['bsp_feature'] = bsp_feature
results['tmin'] = tmin
results['tmax'] = tmax
results['tmin_score'] = tmin_score
results['tmax_score'] = tmax_score
results['reference_temporal_iou'] = reference_temporal_iou
return results | Perform the LoadProposals loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline. | transform | python | open-mmlab/mmaction2 | mmaction/datasets/transforms/loading.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py | Apache-2.0 |
def _register_hooks(self, layer_name: str) -> None:
"""Register forward and backward hook to a layer, given layer_name, to
obtain gradients and activations.
Args:
layer_name (str): name of the layer.
"""
def get_gradients(module, grad_input, grad_output):
self.target_gradients = grad_output[0].detach()
def get_activations(module, input, output):
self.target_activations = output.clone().detach()
layer_ls = layer_name.split('/')
prev_module = self.model
for layer in layer_ls:
prev_module = prev_module._modules[layer]
target_layer = prev_module
target_layer.register_forward_hook(get_activations)
target_layer.register_backward_hook(get_gradients) | Register forward and backward hook to a layer, given layer_name, to
obtain gradients and activations.
Args:
layer_name (str): name of the layer. | _register_hooks | python | open-mmlab/mmaction2 | mmaction/utils/gradcam_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/gradcam_utils.py | Apache-2.0 |
def _calculate_localization_map(self,
data: dict,
use_labels: bool,
delta=1e-20) -> tuple:
"""Calculate localization map for all inputs with Grad-CAM.
Args:
data (dict): model inputs, generated by test pipeline,
use_labels (bool): Whether to use given labels to generate
localization map.
delta (float): used in localization map normalization,
must be small enough. Please make sure
`localization_map_max - localization_map_min >> delta`
Returns:
localization_map (torch.Tensor): the localization map for
input imgs.
preds (torch.Tensor): Model predictions with shape
(batch_size, num_classes).
"""
inputs = data['inputs']
# use score before softmax
self.model.cls_head.average_clips = 'score'
# model forward & backward
results = self.model.test_step(data)
preds = [result.pred_score for result in results]
preds = torch.stack(preds)
if use_labels:
labels = [result.gt_label for result in results]
labels = torch.stack(labels)
score = torch.gather(preds, dim=1, index=labels)
else:
score = torch.max(preds, dim=-1)[0]
self.model.zero_grad()
score = torch.sum(score)
score.backward()
imgs = torch.stack(inputs)
if self.is_recognizer2d:
# [batch_size, num_segments, 3, H, W]
b, t, _, h, w = imgs.size()
else:
# [batch_size, num_crops*num_clips, 3, clip_len, H, W]
b1, b2, _, t, h, w = imgs.size()
b = b1 * b2
gradients = self.target_gradients
activations = self.target_activations
if self.is_recognizer2d:
# [B*Tg, C', H', W']
b_tg, c, _, _ = gradients.size()
tg = b_tg // b
else:
# source shape: [B, C', Tg, H', W']
_, c, tg, _, _ = gradients.size()
# target shape: [B, Tg, C', H', W']
gradients = gradients.permute(0, 2, 1, 3, 4)
activations = activations.permute(0, 2, 1, 3, 4)
# calculate & resize to [B, 1, T, H, W]
weights = torch.mean(gradients.view(b, tg, c, -1), dim=3)
weights = weights.view(b, tg, c, 1, 1)
activations = activations.view([b, tg, c] +
list(activations.size()[-2:]))
localization_map = torch.sum(
weights * activations, dim=2, keepdim=True)
localization_map = F.relu(localization_map)
localization_map = localization_map.permute(0, 2, 1, 3, 4)
localization_map = F.interpolate(
localization_map,
size=(t, h, w),
mode='trilinear',
align_corners=False)
# Normalize the localization map.
localization_map_min, localization_map_max = (
torch.min(localization_map.view(b, -1), dim=-1, keepdim=True)[0],
torch.max(localization_map.view(b, -1), dim=-1, keepdim=True)[0])
localization_map_min = torch.reshape(
localization_map_min, shape=(b, 1, 1, 1, 1))
localization_map_max = torch.reshape(
localization_map_max, shape=(b, 1, 1, 1, 1))
localization_map = (localization_map - localization_map_min) / (
localization_map_max - localization_map_min + delta)
localization_map = localization_map.data
return localization_map.squeeze(dim=1), preds | Calculate localization map for all inputs with Grad-CAM.
Args:
data (dict): model inputs, generated by test pipeline,
use_labels (bool): Whether to use given labels to generate
localization map.
delta (float): used in localization map normalization,
must be small enough. Please make sure
`localization_map_max - localization_map_min >> delta`
Returns:
localization_map (torch.Tensor): the localization map for
input imgs.
preds (torch.Tensor): Model predictions with shape
(batch_size, num_classes). | _calculate_localization_map | python | open-mmlab/mmaction2 | mmaction/utils/gradcam_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/gradcam_utils.py | Apache-2.0 |
def _alpha_blending(self, localization_map: torch.Tensor,
input_imgs: torch.Tensor,
alpha: float) -> torch.Tensor:
"""Blend heatmaps and model input images and get visulization results.
Args:
localization_map (torch.Tensor): localization map for all inputs,
generated with Grad-CAM.
input_imgs (torch.Tensor): model inputs, raw images.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
torch.Tensor: blending results for localization map and input
images, with shape [B, T, H, W, 3] and pixel values in
RGB order within range [0, 1].
"""
# localization_map shape [B, T, H, W]
localization_map = localization_map.cpu()
# heatmap shape [B, T, H, W, 3] in RGB order
heatmap = self.colormap(localization_map.detach().numpy())
heatmap = heatmap[..., :3]
heatmap = torch.from_numpy(heatmap)
input_imgs = torch.stack(input_imgs)
# Permute input imgs to [B, T, H, W, 3], like heatmap
if self.is_recognizer2d:
# Recognizer2D input (B, T, C, H, W)
curr_inp = input_imgs.permute(0, 1, 3, 4, 2)
else:
# Recognizer3D input (B', num_clips*num_crops, C, T, H, W)
# B = B' * num_clips * num_crops
curr_inp = input_imgs.view([-1] + list(input_imgs.size()[2:]))
curr_inp = curr_inp.permute(0, 2, 3, 4, 1)
# renormalize input imgs to [0, 1]
curr_inp = curr_inp.cpu().float()
curr_inp /= 255.
# alpha blending
blended_imgs = alpha * heatmap + (1 - alpha) * curr_inp
return blended_imgs | Blend heatmaps and model input images and get visulization results.
Args:
localization_map (torch.Tensor): localization map for all inputs,
generated with Grad-CAM.
input_imgs (torch.Tensor): model inputs, raw images.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
torch.Tensor: blending results for localization map and input
images, with shape [B, T, H, W, 3] and pixel values in
RGB order within range [0, 1]. | _alpha_blending | python | open-mmlab/mmaction2 | mmaction/utils/gradcam_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/gradcam_utils.py | Apache-2.0 |
def __call__(self,
data: dict,
use_labels: bool = False,
alpha: float = 0.5) -> tuple:
"""Visualize the localization maps on their corresponding inputs as
heatmap, using Grad-CAM.
Generate visualization results for **ALL CROPS**.
For example, for I3D model, if `clip_len=32, num_clips=10` and
use `ThreeCrop` in test pipeline, then for every model inputs,
there are 960(32*10*3) images generated.
Args:
data (dict): model inputs, generated by test pipeline.
use_labels (bool): Whether to use given labels to generate
localization map.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
blended_imgs (torch.Tensor): Visualization results, blended by
localization maps and model inputs.
preds (torch.Tensor): Model predictions for inputs.
"""
# localization_map shape [B, T, H, W]
# preds shape [batch_size, num_classes]
localization_map, preds = self._calculate_localization_map(
data, use_labels=use_labels)
# blended_imgs shape [B, T, H, W, 3]
blended_imgs = self._alpha_blending(localization_map, data['inputs'],
alpha)
# blended_imgs shape [B, T, H, W, 3]
# preds shape [batch_size, num_classes]
# Recognizer2D: B = batch_size, T = num_segments
# Recognizer3D: B = batch_size * num_crops * num_clips, T = clip_len
return blended_imgs, preds | Visualize the localization maps on their corresponding inputs as
heatmap, using Grad-CAM.
Generate visualization results for **ALL CROPS**.
For example, for I3D model, if `clip_len=32, num_clips=10` and
use `ThreeCrop` in test pipeline, then for every model inputs,
there are 960(32*10*3) images generated.
Args:
data (dict): model inputs, generated by test pipeline.
use_labels (bool): Whether to use given labels to generate
localization map.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
blended_imgs (torch.Tensor): Visualization results, blended by
localization maps and model inputs.
preds (torch.Tensor): Model predictions for inputs. | __call__ | python | open-mmlab/mmaction2 | mmaction/utils/gradcam_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/gradcam_utils.py | Apache-2.0 |
def register_all_modules(init_default_scope: bool = True) -> None:
"""Register all modules in mmaction into the registries.
Args:
init_default_scope (bool): Whether initialize the mmaction default
scope. If True, the global default scope will be set to `mmaction`,
and all registries will build modules from mmaction's registry
node. To understand more about the registry, please refer to
https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md
Defaults to True.
"""
import mmaction.datasets # noqa: F401,F403
import mmaction.engine # noqa: F401,F403
import mmaction.evaluation # noqa: F401,F403
import mmaction.models # noqa: F401,F403
import mmaction.structures # noqa: F401,F403
import mmaction.visualization # noqa: F401,F403
if init_default_scope:
never_created = DefaultScope.get_current_instance() is None \
or not DefaultScope.check_instance_created('mmaction')
if never_created:
DefaultScope.get_instance('mmaction', scope_name='mmaction')
return
current_scope = DefaultScope.get_current_instance()
if current_scope.scope_name != 'mmaction':
warnings.warn('The current default scope '
f'"{current_scope.scope_name}" is not "mmaction", '
'`register_all_modules` will force set the current'
'default scope to "mmaction". If this is not as '
'expected, please set `init_default_scope=False`.')
# avoid name conflict
new_instance_name = f'mmaction-{datetime.datetime.now()}'
DefaultScope.get_instance(new_instance_name, scope_name='mmaction') | Register all modules in mmaction into the registries.
Args:
init_default_scope (bool): Whether initialize the mmaction default
scope. If True, the global default scope will be set to `mmaction`,
and all registries will build modules from mmaction's registry
node. To understand more about the registry, please refer to
https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md
Defaults to True. | register_all_modules | python | open-mmlab/mmaction2 | mmaction/utils/setup_env.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/setup_env.py | Apache-2.0 |
def require(dep, install=None):
"""A wrapper of function for extra package requirements.
Args:
dep (str): The dependency package name, like ``transformers``
or ``transformers>=4.28.0``.
install (str, optional): The installation command hint. Defaults
to None, which means to use "pip install dep".
"""
def wrapper(fn):
assert isfunction(fn)
@wraps(fn)
def ask_install(*args, **kwargs):
name = fn.__qualname__.replace('.__init__', '')
ins = install or f'pip install "{dep}"'
raise ImportError(
f'{name} requires {dep}, please install it by `{ins}`.')
if satisfy_requirement(dep):
fn._verify_require = getattr(fn, '_verify_require', lambda: None)
return fn
ask_install._verify_require = ask_install
return ask_install
return wrapper | A wrapper of function for extra package requirements.
Args:
dep (str): The dependency package name, like ``transformers``
or ``transformers>=4.28.0``.
install (str, optional): The installation command hint. Defaults
to None, which means to use "pip install dep". | require | python | open-mmlab/mmaction2 | mmaction/utils/dependency.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/dependency.py | Apache-2.0 |
def get_random_string(length: int = 15) -> str:
"""Get random string with letters and digits.
Args:
length (int): Length of random string. Defaults to 15.
"""
return ''.join(
random.choice(string.ascii_letters + string.digits)
for _ in range(length)) | Get random string with letters and digits.
Args:
length (int): Length of random string. Defaults to 15. | get_random_string | python | open-mmlab/mmaction2 | mmaction/utils/misc.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/misc.py | Apache-2.0 |
def get_thread_id() -> int:
"""Get current thread id."""
# use ctype to find thread id
thread_id = ctypes.CDLL('libc.so.6').syscall(186)
return thread_id | Get current thread id. | get_thread_id | python | open-mmlab/mmaction2 | mmaction/utils/misc.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/misc.py | Apache-2.0 |
def get_shm_dir() -> str:
"""Get shm dir for temporary usage."""
return '/dev/shm' | Get shm dir for temporary usage. | get_shm_dir | python | open-mmlab/mmaction2 | mmaction/utils/misc.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/misc.py | Apache-2.0 |
def frame_extract(video_path: str,
short_side: Optional[int] = None,
out_dir: str = './tmp'):
"""Extract frames given video_path.
Args:
video_path (str): The video path.
short_side (int): Target short-side of the output image.
Defaults to None, means keeping original shape.
out_dir (str): The output directory. Defaults to ``'./tmp'``.
"""
# Load the video, extract frames into OUT_DIR/video_name
target_dir = osp.join(out_dir, osp.basename(osp.splitext(video_path)[0]))
os.makedirs(target_dir, exist_ok=True)
# Should be able to handle videos up to several hours
frame_tmpl = osp.join(target_dir, 'img_{:06d}.jpg')
assert osp.exists(video_path), f'file not exit {video_path}'
vid = cv2.VideoCapture(video_path)
frames = []
frame_paths = []
flag, frame = vid.read()
cnt = 0
new_h, new_w = None, None
while flag:
if short_side is not None:
if new_h is None:
h, w, _ = frame.shape
new_w, new_h = mmcv.rescale_size((w, h), (short_side, np.Inf))
frame = mmcv.imresize(frame, (new_w, new_h))
frames.append(frame)
frame_path = frame_tmpl.format(cnt + 1)
frame_paths.append(frame_path)
cv2.imwrite(frame_path, frame)
cnt += 1
flag, frame = vid.read()
return frame_paths, frames | Extract frames given video_path.
Args:
video_path (str): The video path.
short_side (int): Target short-side of the output image.
Defaults to None, means keeping original shape.
out_dir (str): The output directory. Defaults to ``'./tmp'``. | frame_extract | python | open-mmlab/mmaction2 | mmaction/utils/misc.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/misc.py | Apache-2.0 |
def get_str_type(module: Union[str, ModuleType, FunctionType]) -> str:
"""Return the string type name of module.
Args:
module (str | ModuleType | FunctionType):
The target module class
Returns:
Class name of the module
"""
if isinstance(module, str):
str_type = module
elif inspect.isclass(module) or inspect.isfunction(module):
str_type = module.__name__
else:
return None
return str_type | Return the string type name of module.
Args:
module (str | ModuleType | FunctionType):
The target module class
Returns:
Class name of the module | get_str_type | python | open-mmlab/mmaction2 | mmaction/utils/misc.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/misc.py | Apache-2.0 |
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_basic_env()
env_info['MMAction2'] = (
mmaction.__version__ + '+' + get_git_hash(digits=7))
env_info['MMCV'] = (mmcv.__version__)
try:
import mmdet
env_info['MMDetection'] = (mmdet.__version__)
except ImportError:
pass
try:
import mmpose
env_info['MMPose'] = (mmpose.__version__)
except ImportError:
pass
return env_info | Collect the information of the running environments. | collect_env | python | open-mmlab/mmaction2 | mmaction/utils/collect_env.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/collect_env.py | Apache-2.0 |
def init_recognizer(config: Union[str, Path, mmengine.Config],
checkpoint: Optional[str] = None,
device: Union[str, torch.device] = 'cuda:0') -> nn.Module:
"""Initialize a recognizer from config file.
Args:
config (str or :obj:`Path` or :obj:`mmengine.Config`): Config file
path, :obj:`Path` or the config object.
checkpoint (str, optional): Checkpoint path/url. If set to None,
the model will not load any weights. Defaults to None.
device (str | torch.device): The desired device of returned
tensor. Defaults to ``'cuda:0'``.
Returns:
nn.Module: The constructed recognizer.
"""
if isinstance(config, (str, Path)):
config = mmengine.Config.fromfile(config)
elif not isinstance(config, mmengine.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
init_default_scope(config.get('default_scope', 'mmaction'))
if hasattr(config.model, 'backbone') and config.model.backbone.get(
'pretrained', None):
config.model.backbone.pretrained = None
model = MODELS.build(config.model)
if checkpoint is not None:
load_checkpoint(model, checkpoint, map_location='cpu')
model.cfg = config
model.to(device)
model.eval()
return model | Initialize a recognizer from config file.
Args:
config (str or :obj:`Path` or :obj:`mmengine.Config`): Config file
path, :obj:`Path` or the config object.
checkpoint (str, optional): Checkpoint path/url. If set to None,
the model will not load any weights. Defaults to None.
device (str | torch.device): The desired device of returned
tensor. Defaults to ``'cuda:0'``.
Returns:
nn.Module: The constructed recognizer. | init_recognizer | python | open-mmlab/mmaction2 | mmaction/apis/inference.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py | Apache-2.0 |
def inference_recognizer(model: nn.Module,
video: Union[str, dict],
test_pipeline: Optional[Compose] = None
) -> ActionDataSample:
"""Inference a video with the recognizer.
Args:
model (nn.Module): The loaded recognizer.
video (Union[str, dict]): The video file path or the results
dictionary (the input of pipeline).
test_pipeline (:obj:`Compose`, optional): The test pipeline.
If not specified, the test pipeline in the config will be
used. Defaults to None.
Returns:
:obj:`ActionDataSample`: The inference results. Specifically, the
predicted scores are saved at ``result.pred_score``.
"""
if test_pipeline is None:
cfg = model.cfg
init_default_scope(cfg.get('default_scope', 'mmaction'))
test_pipeline_cfg = cfg.test_pipeline
test_pipeline = Compose(test_pipeline_cfg)
input_flag = None
if isinstance(video, dict):
input_flag = 'dict'
elif isinstance(video, str) and osp.exists(video):
if video.endswith('.npy'):
input_flag = 'audio'
else:
input_flag = 'video'
else:
raise RuntimeError(f'The type of argument `video` is not supported: '
f'{type(video)}')
if input_flag == 'dict':
data = video
if input_flag == 'video':
data = dict(filename=video, label=-1, start_index=0, modality='RGB')
if input_flag == 'audio':
data = dict(
audio_path=video,
total_frames=len(np.load(video)),
start_index=0,
label=-1)
data = test_pipeline(data)
data = pseudo_collate([data])
# Forward the model
with torch.no_grad():
result = model.test_step(data)[0]
return result | Inference a video with the recognizer.
Args:
model (nn.Module): The loaded recognizer.
video (Union[str, dict]): The video file path or the results
dictionary (the input of pipeline).
test_pipeline (:obj:`Compose`, optional): The test pipeline.
If not specified, the test pipeline in the config will be
used. Defaults to None.
Returns:
:obj:`ActionDataSample`: The inference results. Specifically, the
predicted scores are saved at ``result.pred_score``. | inference_recognizer | python | open-mmlab/mmaction2 | mmaction/apis/inference.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py | Apache-2.0 |
def inference_skeleton(model: nn.Module,
pose_results: List[dict],
img_shape: Tuple[int],
test_pipeline: Optional[Compose] = None
) -> ActionDataSample:
"""Inference a pose results with the skeleton recognizer.
Args:
model (nn.Module): The loaded recognizer.
pose_results (List[dict]): The pose estimation results dictionary
(the results of `pose_inference`)
img_shape (Tuple[int]): The original image shape used for inference
skeleton recognizer.
test_pipeline (:obj:`Compose`, optional): The test pipeline.
If not specified, the test pipeline in the config will be
used. Defaults to None.
Returns:
:obj:`ActionDataSample`: The inference results. Specifically, the
predicted scores are saved at ``result.pred_score``.
"""
if test_pipeline is None:
cfg = model.cfg
init_default_scope(cfg.get('default_scope', 'mmaction'))
test_pipeline_cfg = cfg.test_pipeline
test_pipeline = Compose(test_pipeline_cfg)
h, w = img_shape
num_keypoint = pose_results[0]['keypoints'].shape[1]
num_frame = len(pose_results)
num_person = max([len(x['keypoints']) for x in pose_results])
fake_anno = dict(
frame_dict='',
label=-1,
img_shape=(h, w),
origin_shape=(h, w),
start_index=0,
modality='Pose',
total_frames=num_frame)
keypoint = np.zeros((num_frame, num_person, num_keypoint, 2),
dtype=np.float16)
keypoint_score = np.zeros((num_frame, num_person, num_keypoint),
dtype=np.float16)
for f_idx, frm_pose in enumerate(pose_results):
frm_num_persons = frm_pose['keypoints'].shape[0]
for p_idx in range(frm_num_persons):
keypoint[f_idx, p_idx] = frm_pose['keypoints'][p_idx]
keypoint_score[f_idx, p_idx] = frm_pose['keypoint_scores'][p_idx]
fake_anno['keypoint'] = keypoint.transpose((1, 0, 2, 3))
fake_anno['keypoint_score'] = keypoint_score.transpose((1, 0, 2))
return inference_recognizer(model, fake_anno, test_pipeline) | Inference a pose results with the skeleton recognizer.
Args:
model (nn.Module): The loaded recognizer.
pose_results (List[dict]): The pose estimation results dictionary
(the results of `pose_inference`)
img_shape (Tuple[int]): The original image shape used for inference
skeleton recognizer.
test_pipeline (:obj:`Compose`, optional): The test pipeline.
If not specified, the test pipeline in the config will be
used. Defaults to None.
Returns:
:obj:`ActionDataSample`: The inference results. Specifically, the
predicted scores are saved at ``result.pred_score``. | inference_skeleton | python | open-mmlab/mmaction2 | mmaction/apis/inference.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py | Apache-2.0 |
def detection_inference(det_config: Union[str, Path, mmengine.Config,
nn.Module],
det_checkpoint: str,
frame_paths: List[str],
det_score_thr: float = 0.9,
det_cat_id: int = 0,
device: Union[str, torch.device] = 'cuda:0',
with_score: bool = False) -> tuple:
"""Detect human boxes given frame paths.
Args:
det_config (Union[str, :obj:`Path`, :obj:`mmengine.Config`,
:obj:`torch.nn.Module`]):
Det config file path or Detection model object. It can be
a :obj:`Path`, a config object, or a module object.
det_checkpoint: Checkpoint path/url.
frame_paths (List[str]): The paths of frames to do detection inference.
det_score_thr (float): The threshold of human detection score.
Defaults to 0.9.
det_cat_id (int): The category id for human detection. Defaults to 0.
device (Union[str, torch.device]): The desired device of returned
tensor. Defaults to ``'cuda:0'``.
with_score (bool): Whether to append detection score after box.
Defaults to None.
Returns:
List[np.ndarray]: List of detected human boxes.
List[:obj:`DetDataSample`]: List of data samples, generally used
to visualize data.
"""
try:
from mmdet.apis import inference_detector, init_detector
from mmdet.structures import DetDataSample
except (ImportError, ModuleNotFoundError):
raise ImportError('Failed to import `inference_detector` and '
'`init_detector` from `mmdet.apis`. These apis are '
'required in this inference api! ')
if isinstance(det_config, nn.Module):
model = det_config
else:
model = init_detector(
config=det_config, checkpoint=det_checkpoint, device=device)
results = []
data_samples = []
print('Performing Human Detection for each frame')
for frame_path in track_iter_progress(frame_paths):
det_data_sample: DetDataSample = inference_detector(model, frame_path)
pred_instance = det_data_sample.pred_instances.cpu().numpy()
bboxes = pred_instance.bboxes
scores = pred_instance.scores
# We only keep human detection bboxs with score larger
# than `det_score_thr` and category id equal to `det_cat_id`.
valid_idx = np.logical_and(pred_instance.labels == det_cat_id,
pred_instance.scores > det_score_thr)
bboxes = bboxes[valid_idx]
scores = scores[valid_idx]
if with_score:
bboxes = np.concatenate((bboxes, scores[:, None]), axis=-1)
results.append(bboxes)
data_samples.append(det_data_sample)
return results, data_samples | Detect human boxes given frame paths.
Args:
det_config (Union[str, :obj:`Path`, :obj:`mmengine.Config`,
:obj:`torch.nn.Module`]):
Det config file path or Detection model object. It can be
a :obj:`Path`, a config object, or a module object.
det_checkpoint: Checkpoint path/url.
frame_paths (List[str]): The paths of frames to do detection inference.
det_score_thr (float): The threshold of human detection score.
Defaults to 0.9.
det_cat_id (int): The category id for human detection. Defaults to 0.
device (Union[str, torch.device]): The desired device of returned
tensor. Defaults to ``'cuda:0'``.
with_score (bool): Whether to append detection score after box.
Defaults to None.
Returns:
List[np.ndarray]: List of detected human boxes.
List[:obj:`DetDataSample`]: List of data samples, generally used
to visualize data. | detection_inference | python | open-mmlab/mmaction2 | mmaction/apis/inference.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py | Apache-2.0 |
def pose_inference(pose_config: Union[str, Path, mmengine.Config, nn.Module],
pose_checkpoint: str,
frame_paths: List[str],
det_results: List[np.ndarray],
device: Union[str, torch.device] = 'cuda:0') -> tuple:
"""Perform Top-Down pose estimation.
Args:
pose_config (Union[str, :obj:`Path`, :obj:`mmengine.Config`,
:obj:`torch.nn.Module`]): Pose config file path or
pose model object. It can be a :obj:`Path`, a config object,
or a module object.
pose_checkpoint: Checkpoint path/url.
frame_paths (List[str]): The paths of frames to do pose inference.
det_results (List[np.ndarray]): List of detected human boxes.
device (Union[str, torch.device]): The desired device of returned
tensor. Defaults to ``'cuda:0'``.
Returns:
List[List[Dict[str, np.ndarray]]]: List of pose estimation results.
List[:obj:`PoseDataSample`]: List of data samples, generally used
to visualize data.
"""
try:
from mmpose.apis import inference_topdown, init_model
from mmpose.structures import PoseDataSample, merge_data_samples
except (ImportError, ModuleNotFoundError):
raise ImportError('Failed to import `inference_topdown` and '
'`init_model` from `mmpose.apis`. These apis '
'are required in this inference api! ')
if isinstance(pose_config, nn.Module):
model = pose_config
else:
model = init_model(pose_config, pose_checkpoint, device)
results = []
data_samples = []
print('Performing Human Pose Estimation for each frame')
for f, d in track_iter_progress(list(zip(frame_paths, det_results))):
pose_data_samples: List[PoseDataSample] \
= inference_topdown(model, f, d[..., :4], bbox_format='xyxy')
pose_data_sample = merge_data_samples(pose_data_samples)
pose_data_sample.dataset_meta = model.dataset_meta
# make fake pred_instances
if not hasattr(pose_data_sample, 'pred_instances'):
num_keypoints = model.dataset_meta['num_keypoints']
pred_instances_data = dict(
keypoints=np.empty(shape=(0, num_keypoints, 2)),
keypoints_scores=np.empty(shape=(0, 17), dtype=np.float32),
bboxes=np.empty(shape=(0, 4), dtype=np.float32),
bbox_scores=np.empty(shape=(0), dtype=np.float32))
pose_data_sample.pred_instances = InstanceData(
**pred_instances_data)
poses = pose_data_sample.pred_instances.to_dict()
results.append(poses)
data_samples.append(pose_data_sample)
return results, data_samples | Perform Top-Down pose estimation.
Args:
pose_config (Union[str, :obj:`Path`, :obj:`mmengine.Config`,
:obj:`torch.nn.Module`]): Pose config file path or
pose model object. It can be a :obj:`Path`, a config object,
or a module object.
pose_checkpoint: Checkpoint path/url.
frame_paths (List[str]): The paths of frames to do pose inference.
det_results (List[np.ndarray]): List of detected human boxes.
device (Union[str, torch.device]): The desired device of returned
tensor. Defaults to ``'cuda:0'``.
Returns:
List[List[Dict[str, np.ndarray]]]: List of pose estimation results.
List[:obj:`PoseDataSample`]: List of data samples, generally used
to visualize data. | pose_inference | python | open-mmlab/mmaction2 | mmaction/apis/inference.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py | Apache-2.0 |
def __call__(self,
inputs: InputsType,
return_datasamples: bool = False,
batch_size: int = 1,
return_vis: bool = False,
show: bool = False,
wait_time: int = 0,
draw_pred: bool = True,
vid_out_dir: str = '',
out_type: str = 'video',
print_result: bool = False,
pred_out_file: str = '',
target_resolution: Optional[Tuple[int]] = None,
**kwargs) -> dict:
"""Call the inferencer.
Args:
inputs (InputsType): Inputs for the inferencer.
return_datasamples (bool): Whether to return results as
:obj:`BaseDataElement`. Defaults to False.
batch_size (int): Inference batch size. Defaults to 1.
show (bool): Whether to display the visualization results in a
popup window. Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw predicted bounding boxes.
Defaults to True.
vid_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
out_type (str): Output type of visualization results.
Defaults to 'video'.
print_result (bool): Whether to print the inference result w/o
visualization to the console. Defaults to False.
pred_out_file: File to save the inference results w/o
visualization. If left as empty, no file will be saved.
Defaults to ''.
**kwargs: Other keyword arguments passed to :meth:`preprocess`,
:meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
Each key in kwargs should be in the corresponding set of
``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
and ``postprocess_kwargs``.
Returns:
dict: Inference and visualization results.
"""
return super().__call__(
inputs,
return_datasamples,
batch_size,
return_vis=return_vis,
show=show,
wait_time=wait_time,
draw_pred=draw_pred,
vid_out_dir=vid_out_dir,
print_result=print_result,
pred_out_file=pred_out_file,
out_type=out_type,
target_resolution=target_resolution,
**kwargs) | Call the inferencer.
Args:
inputs (InputsType): Inputs for the inferencer.
return_datasamples (bool): Whether to return results as
:obj:`BaseDataElement`. Defaults to False.
batch_size (int): Inference batch size. Defaults to 1.
show (bool): Whether to display the visualization results in a
popup window. Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw predicted bounding boxes.
Defaults to True.
vid_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
out_type (str): Output type of visualization results.
Defaults to 'video'.
print_result (bool): Whether to print the inference result w/o
visualization to the console. Defaults to False.
pred_out_file: File to save the inference results w/o
visualization. If left as empty, no file will be saved.
Defaults to ''.
**kwargs: Other keyword arguments passed to :meth:`preprocess`,
:meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
Each key in kwargs should be in the corresponding set of
``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
and ``postprocess_kwargs``.
Returns:
dict: Inference and visualization results. | __call__ | python | open-mmlab/mmaction2 | mmaction/apis/inferencers/actionrecog_inferencer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py | Apache-2.0 |
def _inputs_to_list(self, inputs: InputsType) -> list:
"""Preprocess the inputs to a list. The main difference from mmengine
version is that we don't list a directory cause input could be a frame
folder.
Preprocess inputs to a list according to its type:
- list or tuple: return inputs
- str: return a list containing the string. The string
could be a path to file, a url or other types of string according
to the task.
Args:
inputs (InputsType): Inputs for the inferencer.
Returns:
list: List of input for the :meth:`preprocess`.
"""
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
return list(inputs) | Preprocess the inputs to a list. The main difference from mmengine
version is that we don't list a directory cause input could be a frame
folder.
Preprocess inputs to a list according to its type:
- list or tuple: return inputs
- str: return a list containing the string. The string
could be a path to file, a url or other types of string according
to the task.
Args:
inputs (InputsType): Inputs for the inferencer.
Returns:
list: List of input for the :meth:`preprocess`. | _inputs_to_list | python | open-mmlab/mmaction2 | mmaction/apis/inferencers/actionrecog_inferencer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py | Apache-2.0 |
def _init_pipeline(self, cfg: ConfigType) -> Compose:
"""Initialize the test pipeline."""
test_pipeline = cfg.test_dataloader.dataset.pipeline
# Alter data pipelines for decode
if self.input_format == 'array':
for i in range(len(test_pipeline)):
if 'Decode' in get_str_type(test_pipeline[i]['type']):
test_pipeline[i] = dict(type='ArrayDecode')
test_pipeline = [
x for x in test_pipeline if 'Init' not in x['type']
]
elif self.input_format == 'video':
if 'Init' not in get_str_type(test_pipeline[0]['type']):
test_pipeline = [dict(type='DecordInit')] + test_pipeline
else:
test_pipeline[0] = dict(type='DecordInit')
for i in range(len(test_pipeline)):
if 'Decode' in get_str_type(test_pipeline[i]['type']):
test_pipeline[i] = dict(type='DecordDecode')
elif self.input_format == 'rawframes':
if 'Init' in get_str_type(test_pipeline[0]['type']):
test_pipeline = test_pipeline[1:]
for i in range(len(test_pipeline)):
if 'Decode' in get_str_type(test_pipeline[i]['type']):
test_pipeline[i] = dict(type='RawFrameDecode')
# Alter data pipelines to close TTA, avoid OOM
# Use center crop instead of multiple crop
for i in range(len(test_pipeline)):
if get_str_type(
test_pipeline[i]['type']) in ['ThreeCrop', 'TenCrop']:
test_pipeline[i]['type'] = 'CenterCrop'
# Use single clip for `Recognizer3D`
if cfg.model.type == 'Recognizer3D':
for i in range(len(test_pipeline)):
if get_str_type(test_pipeline[i]['type']) == 'SampleFrames':
test_pipeline[i]['num_clips'] = 1
# Pack multiple types of input format
test_pipeline.insert(
0,
dict(
type='InferencerPackInput',
input_format=self.input_format,
**self.pack_cfg))
return Compose(test_pipeline) | Initialize the test pipeline. | _init_pipeline | python | open-mmlab/mmaction2 | mmaction/apis/inferencers/actionrecog_inferencer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py | Apache-2.0 |
def visualize(
self,
inputs: InputsType,
preds: PredType,
return_vis: bool = False,
show: bool = False,
wait_time: int = 0,
draw_pred: bool = True,
fps: int = 30,
out_type: str = 'video',
target_resolution: Optional[Tuple[int]] = None,
vid_out_dir: str = '',
) -> Union[List[np.ndarray], None]:
"""Visualize predictions.
Args:
inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer.
preds (List[Dict]): Predictions of the model.
return_vis (bool): Whether to return the visualization result.
Defaults to False.
show (bool): Whether to display the image in a popup window.
Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw prediction labels.
Defaults to True.
fps (int): Frames per second for saving video. Defaults to 4.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
vid_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
Returns:
List[np.ndarray] or None: Returns visualization results only if
applicable.
"""
if self.visualizer is None or (not show and vid_out_dir == ''
and not return_vis):
return None
if getattr(self, 'visualizer') is None:
raise ValueError('Visualization needs the "visualizer" term'
'defined in the config, but got None.')
results = []
for single_input, pred in zip(inputs, preds):
if isinstance(single_input, str):
frames = single_input
video_name = osp.basename(single_input)
elif isinstance(single_input, np.ndarray):
frames = single_input.copy()
video_num = str(self.num_visualized_vids).zfill(8)
video_name = f'{video_num}.mp4'
else:
raise ValueError('Unsupported input type: '
f'{type(single_input)}')
out_path = osp.join(vid_out_dir, video_name) if vid_out_dir != '' \
else None
visualization = self.visualizer.add_datasample(
video_name,
frames,
pred,
show_frames=show,
wait_time=wait_time,
draw_gt=False,
draw_pred=draw_pred,
fps=fps,
out_type=out_type,
out_path=out_path,
target_resolution=target_resolution,
)
results.append(visualization)
self.num_visualized_vids += 1
return results | Visualize predictions.
Args:
inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer.
preds (List[Dict]): Predictions of the model.
return_vis (bool): Whether to return the visualization result.
Defaults to False.
show (bool): Whether to display the image in a popup window.
Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw prediction labels.
Defaults to True.
fps (int): Frames per second for saving video. Defaults to 4.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
vid_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
Returns:
List[np.ndarray] or None: Returns visualization results only if
applicable. | visualize | python | open-mmlab/mmaction2 | mmaction/apis/inferencers/actionrecog_inferencer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py | Apache-2.0 |
def postprocess(
self,
preds: PredType,
visualization: Optional[List[np.ndarray]] = None,
return_datasample: bool = False,
print_result: bool = False,
pred_out_file: str = '',
) -> Union[ResType, Tuple[ResType, np.ndarray]]:
"""Process the predictions and visualization results from ``forward``
and ``visualize``.
This method should be responsible for the following tasks:
1. Convert datasamples into a json-serializable dict if needed.
2. Pack the predictions and visualization results and return them.
3. Dump or log the predictions.
Args:
preds (List[Dict]): Predictions of the model.
visualization (Optional[np.ndarray]): Visualized predictions.
return_datasample (bool): Whether to use Datasample to store
inference results. If False, dict will be used.
print_result (bool): Whether to print the inference result w/o
visualization to the console. Defaults to False.
pred_out_file: File to save the inference results w/o
visualization. If left as empty, no file will be saved.
Defaults to ''.
Returns:
dict: Inference and visualization results with key ``predictions``
and ``visualization``.
- ``visualization`` (Any): Returned by :meth:`visualize`.
- ``predictions`` (dict or DataSample): Returned by
:meth:`forward` and processed in :meth:`postprocess`.
If ``return_datasample=False``, it usually should be a
json-serializable dict containing only basic data elements such
as strings and numbers.
"""
result_dict = {}
results = preds
if not return_datasample:
results = []
for pred in preds:
result = self.pred2dict(pred)
results.append(result)
# Add video to the results after printing and dumping
result_dict['predictions'] = results
if print_result:
print(result_dict)
if pred_out_file != '':
mmengine.dump(result_dict, pred_out_file)
result_dict['visualization'] = visualization
return result_dict | Process the predictions and visualization results from ``forward``
and ``visualize``.
This method should be responsible for the following tasks:
1. Convert datasamples into a json-serializable dict if needed.
2. Pack the predictions and visualization results and return them.
3. Dump or log the predictions.
Args:
preds (List[Dict]): Predictions of the model.
visualization (Optional[np.ndarray]): Visualized predictions.
return_datasample (bool): Whether to use Datasample to store
inference results. If False, dict will be used.
print_result (bool): Whether to print the inference result w/o
visualization to the console. Defaults to False.
pred_out_file: File to save the inference results w/o
visualization. If left as empty, no file will be saved.
Defaults to ''.
Returns:
dict: Inference and visualization results with key ``predictions``
and ``visualization``.
- ``visualization`` (Any): Returned by :meth:`visualize`.
- ``predictions`` (dict or DataSample): Returned by
:meth:`forward` and processed in :meth:`postprocess`.
If ``return_datasample=False``, it usually should be a
json-serializable dict containing only basic data elements such
as strings and numbers. | postprocess | python | open-mmlab/mmaction2 | mmaction/apis/inferencers/actionrecog_inferencer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py | Apache-2.0 |
def pred2dict(self, data_sample: ActionDataSample) -> Dict:
"""Extract elements necessary to represent a prediction into a
dictionary. It's better to contain only basic data elements such as
strings and numbers in order to guarantee it's json-serializable.
Args:
data_sample (ActionDataSample): The data sample to be converted.
Returns:
dict: The output dictionary.
"""
result = {}
result['pred_labels'] = data_sample.pred_label.tolist()
result['pred_scores'] = data_sample.pred_score.tolist()
return result | Extract elements necessary to represent a prediction into a
dictionary. It's better to contain only basic data elements such as
strings and numbers in order to guarantee it's json-serializable.
Args:
data_sample (ActionDataSample): The data sample to be converted.
Returns:
dict: The output dictionary. | pred2dict | python | open-mmlab/mmaction2 | mmaction/apis/inferencers/actionrecog_inferencer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py | Apache-2.0 |
def forward(self, inputs: InputType, batch_size: int,
**forward_kwargs) -> PredType:
"""Forward the inputs to the model.
Args:
inputs (InputsType): The inputs to be forwarded.
batch_size (int): Batch size. Defaults to 1.
Returns:
Dict: The prediction results. Possibly with keys "rec".
"""
result = {}
if self.mode == 'rec':
predictions = self.actionrecog_inferencer(
inputs,
return_datasamples=True,
batch_size=batch_size,
**forward_kwargs)['predictions']
result['rec'] = [[p] for p in predictions]
return result | Forward the inputs to the model.
Args:
inputs (InputsType): The inputs to be forwarded.
batch_size (int): Batch size. Defaults to 1.
Returns:
Dict: The prediction results. Possibly with keys "rec". | forward | python | open-mmlab/mmaction2 | mmaction/apis/inferencers/mmaction2_inferencer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/mmaction2_inferencer.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.