response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Build a dataset, defined by `dataset_name`. Args: dataset_name (str): the name of the dataset to be constructed. cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py split (str): the split of the data loader. Options include `train`, `val`, and `test`. Returns: Dataset: a constructed dataset specified by dataset_name.
def build_dataset(dataset_name, cfg, split): """ Build a dataset, defined by `dataset_name`. Args: dataset_name (str): the name of the dataset to be constructed. cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py split (str): the split of the data loader. Options include `train`, `val`, and `test`. Returns: Dataset: a constructed dataset specified by dataset_name. """ # Capitalize the the first letter of the dataset_name since the dataset_name # in configs may be in lowercase but the name of dataset class should always # start with an uppercase letter. name = dataset_name.capitalize() return DATASET_REGISTRY.get(name)(cfg, split)
Clip the boxes with the height and width of the image size. Args: boxes (ndarray): bounding boxes to peform crop. The dimension is `num boxes` x 4. height (int): the height of the image. width (int): the width of the image. Returns: boxes (ndarray): cropped bounding boxes.
def clip_boxes_to_image(boxes, height, width): """ Clip the boxes with the height and width of the image size. Args: boxes (ndarray): bounding boxes to peform crop. The dimension is `num boxes` x 4. height (int): the height of the image. width (int): the width of the image. Returns: boxes (ndarray): cropped bounding boxes. """ boxes[:, [0, 2]] = np.minimum( width - 1.0, np.maximum(0.0, boxes[:, [0, 2]]) ) boxes[:, [1, 3]] = np.minimum( height - 1.0, np.maximum(0.0, boxes[:, [1, 3]]) ) return boxes
Perform a spatial short scale jittering on the given images and corresponding boxes. Args: images (list): list of images to perform scale jitter. Dimension is `height` x `width` x `channel`. min_size (int): the minimal size to scale the frames. max_size (int): the maximal size to scale the frames. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: (list): the list of scaled images with dimension of `new height` x `new width` x `channel`. (ndarray or None): the scaled boxes with dimension of `num boxes` x 4.
def random_short_side_scale_jitter_list(images, min_size, max_size, boxes=None): """ Perform a spatial short scale jittering on the given images and corresponding boxes. Args: images (list): list of images to perform scale jitter. Dimension is `height` x `width` x `channel`. min_size (int): the minimal size to scale the frames. max_size (int): the maximal size to scale the frames. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: (list): the list of scaled images with dimension of `new height` x `new width` x `channel`. (ndarray or None): the scaled boxes with dimension of `num boxes` x 4. """ size = int(round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size))) height = images[0].shape[0] width = images[0].shape[1] if (width <= height and width == size) or ( height <= width and height == size ): return images, boxes new_width = size new_height = size if width < height: new_height = int(math.floor((float(height) / width) * size)) if boxes is not None: boxes = [ proposal * float(new_height) / height for proposal in boxes ] else: new_width = int(math.floor((float(width) / height) * size)) if boxes is not None: boxes = [proposal * float(new_width) / width for proposal in boxes] return ( [ cv2.resize( image, (new_width, new_height), interpolation=cv2.INTER_LINEAR ).astype(np.float32) for image in images ], boxes, )
Scale the short side of the image to size. Args: size (int): size to scale the image. image (array): image to perform short side scale. Dimension is `height` x `width` x `channel`. Returns: (ndarray): the scaled image with dimension of `height` x `width` x `channel`.
def scale(size, image): """ Scale the short side of the image to size. Args: size (int): size to scale the image. image (array): image to perform short side scale. Dimension is `height` x `width` x `channel`. Returns: (ndarray): the scaled image with dimension of `height` x `width` x `channel`. """ height = image.shape[0] width = image.shape[1] if (width <= height and width == size) or ( height <= width and height == size ): return image new_width = size new_height = size if width < height: new_height = int(math.floor((float(height) / width) * size)) else: new_width = int(math.floor((float(width) / height) * size)) img = cv2.resize( image, (new_width, new_height), interpolation=cv2.INTER_LINEAR ) return img.astype(np.float32)
Scale the short side of the box to size. Args: size (int): size to scale the image. boxes (ndarray): bounding boxes to peform scale. The dimension is `num boxes` x 4. height (int): the height of the image. width (int): the width of the image. Returns: boxes (ndarray): scaled bounding boxes.
def scale_boxes(size, boxes, height, width): """ Scale the short side of the box to size. Args: size (int): size to scale the image. boxes (ndarray): bounding boxes to peform scale. The dimension is `num boxes` x 4. height (int): the height of the image. width (int): the width of the image. Returns: boxes (ndarray): scaled bounding boxes. """ if (width <= height and width == size) or ( height <= width and height == size ): return boxes new_width = size new_height = size if width < height: new_height = int(math.floor((float(height) / width) * size)) boxes *= float(new_height) / height else: new_width = int(math.floor((float(width) / height) * size)) boxes *= float(new_width) / width return boxes
Horizontally flip the list of image and optional boxes. Args: prob (float): probability to flip. image (list): ilist of images to perform short side scale. Dimension is `height` x `width` x `channel` or `channel` x `height` x `width`. order (str): order of the `height`, `channel` and `width`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: (ndarray): the scaled image with dimension of `height` x `width` x `channel`. (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4.
def horizontal_flip_list(prob, images, order="CHW", boxes=None): """ Horizontally flip the list of image and optional boxes. Args: prob (float): probability to flip. image (list): ilist of images to perform short side scale. Dimension is `height` x `width` x `channel` or `channel` x `height` x `width`. order (str): order of the `height`, `channel` and `width`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: (ndarray): the scaled image with dimension of `height` x `width` x `channel`. (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. """ _, width, _ = images[0].shape if np.random.uniform() < prob: if boxes is not None: boxes = [flip_boxes(proposal, width) for proposal in boxes] if order == "CHW": out_images = [] for image in images: image = np.asarray(image).swapaxes(2, 0) image = image[::-1] out_images.append(image.swapaxes(0, 2)) return out_images, boxes elif order == "HWC": return [cv2.flip(image, 1) for image in images], boxes return images, boxes
Perform left, center, or right crop of the given list of images. Args: size (int): size to crop. image (list): ilist of images to perform short side scale. Dimension is `height` x `width` x `channel` or `channel` x `height` x `width`. spatial_shift_pos (int): option includes 0 (left), 1 (middle), and 2 (right) crop. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (ndarray): the cropped list of images with dimension of `height` x `width` x `channel`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4.
def spatial_shift_crop_list(size, images, spatial_shift_pos, boxes=None): """ Perform left, center, or right crop of the given list of images. Args: size (int): size to crop. image (list): ilist of images to perform short side scale. Dimension is `height` x `width` x `channel` or `channel` x `height` x `width`. spatial_shift_pos (int): option includes 0 (left), 1 (middle), and 2 (right) crop. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (ndarray): the cropped list of images with dimension of `height` x `width` x `channel`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. """ assert spatial_shift_pos in [0, 1, 2] height = images[0].shape[0] width = images[0].shape[1] y_offset = int(math.ceil((height - size) / 2)) x_offset = int(math.ceil((width - size) / 2)) if height > width: if spatial_shift_pos == 0: y_offset = 0 elif spatial_shift_pos == 2: y_offset = height - size else: if spatial_shift_pos == 0: x_offset = 0 elif spatial_shift_pos == 2: x_offset = width - size cropped = [ image[y_offset : y_offset + size, x_offset : x_offset + size, :] for image in images ] assert cropped[0].shape[0] == size, "Image height not cropped properly" assert cropped[0].shape[1] == size, "Image width not cropped properly" if boxes is not None: for i in range(len(boxes)): boxes[i][:, [0, 2]] -= x_offset boxes[i][:, [1, 3]] -= y_offset return cropped, boxes
Transpose the dimension from `channel` x `height` x `width` to `height` x `width` x `channel`. Args: image (array): image to transpose. Returns (array): transposed image.
def CHW2HWC(image): """ Transpose the dimension from `channel` x `height` x `width` to `height` x `width` x `channel`. Args: image (array): image to transpose. Returns (array): transposed image. """ return image.transpose([1, 2, 0])
Transpose the dimension from `height` x `width` x `channel` to `channel` x `height` x `width`. Args: image (array): image to transpose. Returns (array): transposed image.
def HWC2CHW(image): """ Transpose the dimension from `height` x `width` x `channel` to `channel` x `height` x `width`. Args: image (array): image to transpose. Returns (array): transposed image. """ return image.transpose([2, 0, 1])
Perform color jitter on the list of images. Args: images (list): list of images to perform color jitter. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: images (list): the jittered list of images.
def color_jitter_list( images, img_brightness=0, img_contrast=0, img_saturation=0 ): """ Perform color jitter on the list of images. Args: images (list): list of images to perform color jitter. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: images (list): the jittered list of images. """ jitter = [] if img_brightness != 0: jitter.append("brightness") if img_contrast != 0: jitter.append("contrast") if img_saturation != 0: jitter.append("saturation") if len(jitter) > 0: order = np.random.permutation(np.arange(len(jitter))) for idx in range(0, len(jitter)): if jitter[order[idx]] == "brightness": images = brightness_list(img_brightness, images) elif jitter[order[idx]] == "contrast": images = contrast_list(img_contrast, images) elif jitter[order[idx]] == "saturation": images = saturation_list(img_saturation, images) return images
Perform AlexNet-style PCA jitter on the given list of images. Args: images (list): list of images to perform lighting jitter. alphastd (float): jitter ratio for PCA jitter. eigval (list): eigenvalues for PCA jitter. eigvec (list[list]): eigenvectors for PCA jitter. Returns: out_images (list): the list of jittered images.
def lighting_list(imgs, alphastd, eigval, eigvec, alpha=None): """ Perform AlexNet-style PCA jitter on the given list of images. Args: images (list): list of images to perform lighting jitter. alphastd (float): jitter ratio for PCA jitter. eigval (list): eigenvalues for PCA jitter. eigvec (list[list]): eigenvectors for PCA jitter. Returns: out_images (list): the list of jittered images. """ if alphastd == 0: return imgs # generate alpha1, alpha2, alpha3 alpha = np.random.normal(0, alphastd, size=(1, 3)) eig_vec = np.array(eigvec) eig_val = np.reshape(eigval, (1, 3)) rgb = np.sum( eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0), axis=1, ) out_images = [] for img in imgs: for idx in range(img.shape[0]): img[idx] = img[idx] + rgb[2 - idx] out_images.append(img) return out_images
Perform color normalization on the image with the given mean and stddev. Args: image (array): image to perform color normalization. mean (float): mean value to subtract. stddev (float): stddev to devide.
def color_normalization(image, mean, stddev): """ Perform color normalization on the image with the given mean and stddev. Args: image (array): image to perform color normalization. mean (float): mean value to subtract. stddev (float): stddev to devide. """ # Input image should in format of CHW assert len(mean) == image.shape[0], "channel mean not computed properly" assert len(stddev) == image.shape[0], "channel stddev not computed properly" for idx in range(image.shape[0]): image[idx] = image[idx] - mean[idx] image[idx] = image[idx] / stddev[idx] return image
Pad the given image with the size of pad_size. Args: image (array): image to pad. pad_size (int): size to pad. order (str): order of the `height`, `channel` and `width`. Returns: img (array): padded image.
def pad_image(image, pad_size, order="CHW"): """ Pad the given image with the size of pad_size. Args: image (array): image to pad. pad_size (int): size to pad. order (str): order of the `height`, `channel` and `width`. Returns: img (array): padded image. """ if order == "CHW": img = np.pad( image, ((0, 0), (pad_size, pad_size), (pad_size, pad_size)), mode=str("constant"), ) elif order == "HWC": img = np.pad( image, ((pad_size, pad_size), (pad_size, pad_size), (0, 0)), mode=str("constant"), ) return img
Horizontally flip the image. Args: prob (float): probability to flip. image (array): image to pad. order (str): order of the `height`, `channel` and `width`. Returns: img (array): flipped image.
def horizontal_flip(prob, image, order="CHW"): """ Horizontally flip the image. Args: prob (float): probability to flip. image (array): image to pad. order (str): order of the `height`, `channel` and `width`. Returns: img (array): flipped image. """ assert order in ["CHW", "HWC"], "order {} is not supported".format(order) if np.random.uniform() < prob: if order == "CHW": image = image[:, :, ::-1] elif order == "HWC": image = image[:, ::-1, :] else: raise NotImplementedError("Unknown order {}".format(order)) return image
Horizontally flip the boxes. Args: boxes (array): box to flip. im_width (int): width of the image. Returns: boxes_flipped (array): flipped box.
def flip_boxes(boxes, im_width): """ Horizontally flip the boxes. Args: boxes (array): box to flip. im_width (int): width of the image. Returns: boxes_flipped (array): flipped box. """ boxes_flipped = boxes.copy() boxes_flipped[:, 0::4] = im_width - boxes[:, 2::4] - 1 boxes_flipped[:, 2::4] = im_width - boxes[:, 0::4] - 1 return boxes_flipped
Crop the boxes given the offsets. Args: boxes (array): boxes to crop. x_offset (int): offset on x. y_offset (int): offset on y.
def crop_boxes(boxes, x_offset, y_offset): """ Crop the boxes given the offsets. Args: boxes (array): boxes to crop. x_offset (int): offset on x. y_offset (int): offset on y. """ boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset return boxes
Perform random crop on a list of images. Args: images (list): list of images to perform random crop. size (int): size to crop. pad_size (int): padding size. order (str): order of the `height`, `channel` and `width`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (ndarray): the cropped list of images with dimension of `height` x `width` x `channel`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4.
def random_crop_list(images, size, pad_size=0, order="CHW", boxes=None): """ Perform random crop on a list of images. Args: images (list): list of images to perform random crop. size (int): size to crop. pad_size (int): padding size. order (str): order of the `height`, `channel` and `width`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (ndarray): the cropped list of images with dimension of `height` x `width` x `channel`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. """ # explicitly dealing processing per image order to avoid flipping images. if pad_size > 0: images = [ pad_image(pad_size=pad_size, image=image, order=order) for image in images ] # image format should be CHW. if order == "CHW": if images[0].shape[1] == size and images[0].shape[2] == size: return images, boxes height = images[0].shape[1] width = images[0].shape[2] y_offset = 0 if height > size: y_offset = int(np.random.randint(0, height - size)) x_offset = 0 if width > size: x_offset = int(np.random.randint(0, width - size)) cropped = [ image[:, y_offset : y_offset + size, x_offset : x_offset + size] for image in images ] assert cropped[0].shape[1] == size, "Image not cropped properly" assert cropped[0].shape[2] == size, "Image not cropped properly" elif order == "HWC": if images[0].shape[0] == size and images[0].shape[1] == size: return images, boxes height = images[0].shape[0] width = images[0].shape[1] y_offset = 0 if height > size: y_offset = int(np.random.randint(0, height - size)) x_offset = 0 if width > size: x_offset = int(np.random.randint(0, width - size)) cropped = [ image[y_offset : y_offset + size, x_offset : x_offset + size, :] for image in images ] assert cropped[0].shape[0] == size, "Image not cropped properly" assert cropped[0].shape[1] == size, "Image not cropped properly" if boxes is not None: boxes = [crop_boxes(proposal, x_offset, y_offset) for proposal in boxes] return cropped, boxes
Perform center crop on input images. Args: size (int): size of the cropped height and width. image (array): the image to perform center crop.
def center_crop(size, image): """ Perform center crop on input images. Args: size (int): size of the cropped height and width. image (array): the image to perform center crop. """ height = image.shape[0] width = image.shape[1] y_offset = int(math.ceil((height - size) / 2)) x_offset = int(math.ceil((width - size) / 2)) cropped = image[y_offset : y_offset + size, x_offset : x_offset + size, :] assert cropped.shape[0] == size, "Image height not cropped properly" assert cropped.shape[1] == size, "Image width not cropped properly" return cropped
Perform ResNet style random scale jittering: randomly select the scale from [1/max_size, 1/min_size]. Args: image (array): image to perform random scale. min_size (int): min size to scale. max_size (int) max size to scale. Returns: image (array): scaled image.
def random_scale_jitter(image, min_size, max_size): """ Perform ResNet style random scale jittering: randomly select the scale from [1/max_size, 1/min_size]. Args: image (array): image to perform random scale. min_size (int): min size to scale. max_size (int) max size to scale. Returns: image (array): scaled image. """ img_scale = int( round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size)) ) image = scale(img_scale, image) return image
Perform ResNet style random scale jittering on a list of image: randomly select the scale from [1/max_size, 1/min_size]. Note that all the image will share the same scale. Args: images (list): list of images to perform random scale. min_size (int): min size to scale. max_size (int) max size to scale. Returns: images (list): list of scaled image.
def random_scale_jitter_list(images, min_size, max_size): """ Perform ResNet style random scale jittering on a list of image: randomly select the scale from [1/max_size, 1/min_size]. Note that all the image will share the same scale. Args: images (list): list of images to perform random scale. min_size (int): min size to scale. max_size (int) max size to scale. Returns: images (list): list of scaled image. """ img_scale = int( round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size)) ) return [scale(img_scale, image) for image in images]
Perform random sized cropping on the given image. Random crop with size 8% - 100% image area and aspect ratio in [3/4, 4/3]. Args: image (array): image to crop. size (int): size to crop. area_frac (float): area of fraction. Returns: (array): cropped image.
def random_sized_crop(image, size, area_frac=0.08): """ Perform random sized cropping on the given image. Random crop with size 8% - 100% image area and aspect ratio in [3/4, 4/3]. Args: image (array): image to crop. size (int): size to crop. area_frac (float): area of fraction. Returns: (array): cropped image. """ for _ in range(0, 10): height = image.shape[0] width = image.shape[1] area = height * width target_area = np.random.uniform(area_frac, 1.0) * area aspect_ratio = np.random.uniform(3.0 / 4.0, 4.0 / 3.0) w = int(round(math.sqrt(float(target_area) * aspect_ratio))) h = int(round(math.sqrt(float(target_area) / aspect_ratio))) if np.random.uniform() < 0.5: w, h = h, w if h <= height and w <= width: if height == h: y_offset = 0 else: y_offset = np.random.randint(0, height - h) if width == w: x_offset = 0 else: x_offset = np.random.randint(0, width - w) y_offset = int(y_offset) x_offset = int(x_offset) cropped = image[y_offset : y_offset + h, x_offset : x_offset + w, :] assert ( cropped.shape[0] == h and cropped.shape[1] == w ), "Wrong crop size" cropped = cv2.resize( cropped, (size, size), interpolation=cv2.INTER_LINEAR ) return cropped.astype(np.float32) return center_crop(size, scale(size, image))
Perform AlexNet-style PCA jitter on the given image. Args: image (array): list of images to perform lighting jitter. alphastd (float): jitter ratio for PCA jitter. eigval (array): eigenvalues for PCA jitter. eigvec (list): eigenvectors for PCA jitter. Returns: img (tensor): the jittered image.
def lighting(img, alphastd, eigval, eigvec): """ Perform AlexNet-style PCA jitter on the given image. Args: image (array): list of images to perform lighting jitter. alphastd (float): jitter ratio for PCA jitter. eigval (array): eigenvalues for PCA jitter. eigvec (list): eigenvectors for PCA jitter. Returns: img (tensor): the jittered image. """ if alphastd == 0: return img # generate alpha1, alpha2, alpha3. alpha = np.random.normal(0, alphastd, size=(1, 3)) eig_vec = np.array(eigvec) eig_val = np.reshape(eigval, (1, 3)) rgb = np.sum( eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0), axis=1, ) for idx in range(img.shape[0]): img[idx] = img[idx] + rgb[2 - idx] return img
Perform random sized cropping on the given list of images. Random crop with size 8% - 100% image area and aspect ratio in [3/4, 4/3]. Args: images (list): image to crop. size (int): size to crop. area_frac (float): area of fraction. Returns: (list): list of cropped image.
def random_sized_crop_list(images, size, crop_area_fraction=0.08): """ Perform random sized cropping on the given list of images. Random crop with size 8% - 100% image area and aspect ratio in [3/4, 4/3]. Args: images (list): image to crop. size (int): size to crop. area_frac (float): area of fraction. Returns: (list): list of cropped image. """ for _ in range(0, 10): height = images[0].shape[0] width = images[0].shape[1] area = height * width target_area = np.random.uniform(crop_area_fraction, 1.0) * area aspect_ratio = np.random.uniform(3.0 / 4.0, 4.0 / 3.0) w = int(round(math.sqrt(float(target_area) * aspect_ratio))) h = int(round(math.sqrt(float(target_area) / aspect_ratio))) if np.random.uniform() < 0.5: w, h = h, w if h <= height and w <= width: if height == h: y_offset = 0 else: y_offset = np.random.randint(0, height - h) if width == w: x_offset = 0 else: x_offset = np.random.randint(0, width - w) y_offset = int(y_offset) x_offset = int(x_offset) croppsed_images = [] for image in images: cropped = image[ y_offset : y_offset + h, x_offset : x_offset + w, : ] assert ( cropped.shape[0] == h and cropped.shape[1] == w ), "Wrong crop size" cropped = cv2.resize( cropped, (size, size), interpolation=cv2.INTER_LINEAR ) croppsed_images.append(cropped.astype(np.float32)) return croppsed_images return [center_crop(size, scale(size, image)) for image in images]
Convert the image to gray scale. Args: image (tensor): image to convert to gray scale. Dimension is `channel` x `height` x `width`. Returns: img_gray (tensor): image in gray scale.
def grayscale(image): """ Convert the image to gray scale. Args: image (tensor): image to convert to gray scale. Dimension is `channel` x `height` x `width`. Returns: img_gray (tensor): image in gray scale. """ # R -> 0.299, G -> 0.587, B -> 0.114. img_gray = np.copy(image) gray_channel = 0.299 * image[2] + 0.587 * image[1] + 0.114 * image[0] img_gray[0] = gray_channel img_gray[1] = gray_channel img_gray[2] = gray_channel return img_gray
Perform color saturation on the given image. Args: var (float): variance. image (array): image to perform color saturation. Returns: (array): image that performed color saturation.
def saturation(var, image): """ Perform color saturation on the given image. Args: var (float): variance. image (array): image to perform color saturation. Returns: (array): image that performed color saturation. """ img_gray = grayscale(image) alpha = 1.0 + np.random.uniform(-var, var) return blend(image, img_gray, alpha)
Perform color brightness on the given image. Args: var (float): variance. image (array): image to perform color brightness. Returns: (array): image that performed color brightness.
def brightness(var, image): """ Perform color brightness on the given image. Args: var (float): variance. image (array): image to perform color brightness. Returns: (array): image that performed color brightness. """ img_bright = np.zeros(image.shape).astype(image.dtype) alpha = 1.0 + np.random.uniform(-var, var) return blend(image, img_bright, alpha)
Perform color contrast on the given image. Args: var (float): variance. image (array): image to perform color contrast. Returns: (array): image that performed color contrast.
def contrast(var, image): """ Perform color contrast on the given image. Args: var (float): variance. image (array): image to perform color contrast. Returns: (array): image that performed color contrast. """ img_gray = grayscale(image) img_gray.fill(np.mean(img_gray[0])) alpha = 1.0 + np.random.uniform(-var, var) return blend(image, img_gray, alpha)
Perform color saturation on the list of given images. Args: var (float): variance. images (list): list of images to perform color saturation. Returns: (list): list of images that performed color saturation.
def saturation_list(var, images): """ Perform color saturation on the list of given images. Args: var (float): variance. images (list): list of images to perform color saturation. Returns: (list): list of images that performed color saturation. """ alpha = 1.0 + np.random.uniform(-var, var) out_images = [] for image in images: img_gray = grayscale(image) out_images.append(blend(image, img_gray, alpha)) return out_images
Perform color brightness on the given list of images. Args: var (float): variance. images (list): list of images to perform color brightness. Returns: (array): list of images that performed color brightness.
def brightness_list(var, images): """ Perform color brightness on the given list of images. Args: var (float): variance. images (list): list of images to perform color brightness. Returns: (array): list of images that performed color brightness. """ alpha = 1.0 + np.random.uniform(-var, var) out_images = [] for image in images: img_bright = np.zeros(image.shape).astype(image.dtype) out_images.append(blend(image, img_bright, alpha)) return out_images
Perform color contrast on the given list of images. Args: var (float): variance. images (list): list of images to perform color contrast. Returns: (array): image that performed color contrast.
def contrast_list(var, images): """ Perform color contrast on the given list of images. Args: var (float): variance. images (list): list of images to perform color contrast. Returns: (array): image that performed color contrast. """ alpha = 1.0 + np.random.uniform(-var, var) out_images = [] for image in images: img_gray = grayscale(image) img_gray.fill(np.mean(img_gray[0])) out_images.append(blend(image, img_gray, alpha)) return out_images
Perform color jitter on the given image. Args: image (array): image to perform color jitter. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: image (array): the jittered image.
def color_jitter(image, img_brightness=0, img_contrast=0, img_saturation=0): """ Perform color jitter on the given image. Args: image (array): image to perform color jitter. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: image (array): the jittered image. """ jitter = [] if img_brightness != 0: jitter.append("brightness") if img_contrast != 0: jitter.append("contrast") if img_saturation != 0: jitter.append("saturation") if len(jitter) > 0: order = np.random.permutation(np.arange(len(jitter))) for idx in range(0, len(jitter)): if jitter[order[idx]] == "brightness": image = brightness(img_brightness, image) elif jitter[order[idx]] == "contrast": image = contrast(img_contrast, image) elif jitter[order[idx]] == "saturation": image = saturation(img_saturation, image) return image
Revert scaled input boxes to match the original image size. Args: size (int): size of the cropped image. boxes (array): shape (num_boxes, 4). img_height (int): height of original image. img_width (int): width of original image. Returns: reverted_boxes (array): boxes scaled back to the original image size.
def revert_scaled_boxes(size, boxes, img_height, img_width): """ Revert scaled input boxes to match the original image size. Args: size (int): size of the cropped image. boxes (array): shape (num_boxes, 4). img_height (int): height of original image. img_width (int): width of original image. Returns: reverted_boxes (array): boxes scaled back to the original image size. """ scaled_aspect = np.min([img_height, img_width]) scale_ratio = scaled_aspect / size reverted_boxes = boxes * scale_ratio return reverted_boxes
Given the start and end frame index, sample num_samples frames between the start and end with equal interval. Args: frames (tensor): a tensor of video frames, dimension is `num video frames` x `channel` x `height` x `width`. start_idx (int): the index of the start frame. end_idx (int): the index of the end frame. num_samples (int): number of frames to sample. Returns: frames (tersor): a tensor of temporal sampled video frames, dimension is `num clip frames` x `channel` x `height` x `width`.
def temporal_sampling(frames, start_idx, end_idx, num_samples): """ Given the start and end frame index, sample num_samples frames between the start and end with equal interval. Args: frames (tensor): a tensor of video frames, dimension is `num video frames` x `channel` x `height` x `width`. start_idx (int): the index of the start frame. end_idx (int): the index of the end frame. num_samples (int): number of frames to sample. Returns: frames (tersor): a tensor of temporal sampled video frames, dimension is `num clip frames` x `channel` x `height` x `width`. """ index = torch.linspace(start_idx, end_idx, num_samples) index = torch.clamp(index, 0, frames.shape[0] - 1).long() frames = torch.index_select(frames, 0, index) return frames
Sample a clip of size clip_size from a video of size video_size and return the indices of the first and last frame of the clip. If clip_idx is -1, the clip is randomly sampled, otherwise uniformly split the video to num_clips clips, and select the start and end index of clip_idx-th video clip. Args: video_size (int): number of overall frames. clip_size (int): size of the clip to sample from the frames. clip_idx (int): if clip_idx is -1, perform random jitter sampling. If clip_idx is larger than -1, uniformly split the video to num_clips clips, and select the start and end index of the clip_idx-th video clip. num_clips (int): overall number of clips to uniformly sample from the given video for testing. Returns: start_idx (int): the start frame index. end_idx (int): the end frame index.
def get_start_end_idx(video_size, clip_size, clip_idx, num_clips): """ Sample a clip of size clip_size from a video of size video_size and return the indices of the first and last frame of the clip. If clip_idx is -1, the clip is randomly sampled, otherwise uniformly split the video to num_clips clips, and select the start and end index of clip_idx-th video clip. Args: video_size (int): number of overall frames. clip_size (int): size of the clip to sample from the frames. clip_idx (int): if clip_idx is -1, perform random jitter sampling. If clip_idx is larger than -1, uniformly split the video to num_clips clips, and select the start and end index of the clip_idx-th video clip. num_clips (int): overall number of clips to uniformly sample from the given video for testing. Returns: start_idx (int): the start frame index. end_idx (int): the end frame index. """ delta = max(video_size - clip_size, 0) if clip_idx == -1: # Random temporal sampling. start_idx = random.uniform(0, delta) else: # Uniformly sample the clip with the given index. start_idx = delta * clip_idx / num_clips end_idx = start_idx + clip_size - 1 return start_idx, end_idx
Decode the video with PyAV decoder. Args: container (container): PyAV container. start_pts (int): the starting Presentation TimeStamp to fetch the video frames. end_pts (int): the ending Presentation TimeStamp of the decoded frames. stream (stream): PyAV stream. stream_name (dict): a dictionary of streams. For example, {"video": 0} means video stream at stream index 0. buffer_size (int): number of additional frames to decode beyond end_pts. Returns: result (list): list of frames decoded. max_pts (int): max Presentation TimeStamp of the video sequence.
def pyav_decode_stream( container, start_pts, end_pts, stream, stream_name, buffer_size=0 ): """ Decode the video with PyAV decoder. Args: container (container): PyAV container. start_pts (int): the starting Presentation TimeStamp to fetch the video frames. end_pts (int): the ending Presentation TimeStamp of the decoded frames. stream (stream): PyAV stream. stream_name (dict): a dictionary of streams. For example, {"video": 0} means video stream at stream index 0. buffer_size (int): number of additional frames to decode beyond end_pts. Returns: result (list): list of frames decoded. max_pts (int): max Presentation TimeStamp of the video sequence. """ # Seeking in the stream is imprecise. Thus, seek to an ealier PTS by a # margin pts. margin = 1024 seek_offset = max(start_pts - margin, 0) container.seek(seek_offset, any_frame=False, backward=True, stream=stream) frames = {} buffer_count = 0 max_pts = 0 for frame in container.decode(**stream_name): max_pts = max(max_pts, frame.pts) if frame.pts < start_pts: continue if frame.pts <= end_pts: frames[frame.pts] = frame else: buffer_count += 1 frames[frame.pts] = frame if buffer_count >= buffer_size: break result = [frames[pts] for pts in sorted(frames)] return result, max_pts
If video_meta is not empty, perform temporal selective decoding to sample a clip from the video with TorchVision decoder. If video_meta is empty, decode the entire video and update the video_meta. Args: video_handle (bytes): raw bytes of the video file. sampling_rate (int): frame sampling rate (interval between two sampled frames). num_frames (int): number of frames to sample. clip_idx (int): if clip_idx is -1, perform random temporal sampling. If clip_idx is larger than -1, uniformly split the video to num_clips clips, and select the clip_idx-th video clip. video_meta (dict): a dict contains VideoMetaData. Details can be found at `pytorch/vision/torchvision/io/_video_opt.py`. num_clips (int): overall number of clips to uniformly sample from the given video. target_fps (int): the input video may has different fps, convert it to the target video fps. modalities (tuple): tuple of modalities to decode. Currently only support `visual`, planning to support `acoustic` soon. max_spatial_scale (int): the maximal resolution of the spatial shorter edge size during decoding. Returns: frames (tensor): decoded frames from the video. fps (float): the number of frames per second of the video. decode_all_video (bool): if True, the entire video was decoded.
def torchvision_decode( video_handle, sampling_rate, num_frames, clip_idx, video_meta, num_clips=10, target_fps=30, modalities=("visual",), max_spatial_scale=0, ): """ If video_meta is not empty, perform temporal selective decoding to sample a clip from the video with TorchVision decoder. If video_meta is empty, decode the entire video and update the video_meta. Args: video_handle (bytes): raw bytes of the video file. sampling_rate (int): frame sampling rate (interval between two sampled frames). num_frames (int): number of frames to sample. clip_idx (int): if clip_idx is -1, perform random temporal sampling. If clip_idx is larger than -1, uniformly split the video to num_clips clips, and select the clip_idx-th video clip. video_meta (dict): a dict contains VideoMetaData. Details can be found at `pytorch/vision/torchvision/io/_video_opt.py`. num_clips (int): overall number of clips to uniformly sample from the given video. target_fps (int): the input video may has different fps, convert it to the target video fps. modalities (tuple): tuple of modalities to decode. Currently only support `visual`, planning to support `acoustic` soon. max_spatial_scale (int): the maximal resolution of the spatial shorter edge size during decoding. Returns: frames (tensor): decoded frames from the video. fps (float): the number of frames per second of the video. decode_all_video (bool): if True, the entire video was decoded. """ # Convert the bytes to a tensor. video_tensor = torch.from_numpy(np.frombuffer(video_handle, dtype=np.uint8)) decode_all_video = True video_start_pts, video_end_pts = 0, -1 # The video_meta is empty, fetch the meta data from the raw video. if len(video_meta) == 0: # Tracking the meta info for selective decoding in the future. meta = io._probe_video_from_memory(video_tensor) # Using the information from video_meta to perform selective decoding. video_meta["video_timebase"] = meta.video_timebase video_meta["video_numerator"] = meta.video_timebase.numerator video_meta["video_denominator"] = meta.video_timebase.denominator video_meta["has_video"] = meta.has_video video_meta["video_duration"] = meta.video_duration video_meta["video_fps"] = meta.video_fps video_meta["audio_timebas"] = meta.audio_timebase video_meta["audio_numerator"] = meta.audio_timebase.numerator video_meta["audio_denominator"] = meta.audio_timebase.denominator video_meta["has_audio"] = meta.has_audio video_meta["audio_duration"] = meta.audio_duration video_meta["audio_sample_rate"] = meta.audio_sample_rate fps = video_meta["video_fps"] if ( video_meta["has_video"] and video_meta["video_denominator"] > 0 and video_meta["video_duration"] > 0 ): # try selective decoding. decode_all_video = False clip_size = sampling_rate * num_frames / target_fps * fps start_idx, end_idx = get_start_end_idx( fps * video_meta["video_duration"], clip_size, clip_idx, num_clips ) # Convert frame index to pts. pts_per_frame = video_meta["video_denominator"] / fps video_start_pts = int(start_idx * pts_per_frame) video_end_pts = int(end_idx * pts_per_frame) # Decode the raw video with the tv decoder. v_frames, _ = io._read_video_from_memory( video_tensor, seek_frame_margin=1.0, read_video_stream="visual" in modalities, video_width=0, video_height=0, video_min_dimension=max_spatial_scale, video_pts_range=(video_start_pts, video_end_pts), video_timebase_numerator=video_meta["video_numerator"], video_timebase_denominator=video_meta["video_denominator"], ) if v_frames.shape == torch.Size([0]): # failed selective decoding decode_all_video = True video_start_pts, video_end_pts = 0, -1 v_frames, _ = io._read_video_from_memory( video_tensor, seek_frame_margin=1.0, read_video_stream="visual" in modalities, video_width=0, video_height=0, video_min_dimension=max_spatial_scale, video_pts_range=(video_start_pts, video_end_pts), video_timebase_numerator=video_meta["video_numerator"], video_timebase_denominator=video_meta["video_denominator"], ) return v_frames, fps, decode_all_video
Convert the video from its original fps to the target_fps. If the video support selective decoding (contain decoding information in the video head), the perform temporal selective decoding and sample a clip from the video with the PyAV decoder. If the video does not support selective decoding, decode the entire video. Args: container (container): pyav container. sampling_rate (int): frame sampling rate (interval between two sampled frames. num_frames (int): number of frames to sample. clip_idx (int): if clip_idx is -1, perform random temporal sampling. If clip_idx is larger than -1, uniformly split the video to num_clips clips, and select the clip_idx-th video clip. num_clips (int): overall number of clips to uniformly sample from the given video. target_fps (int): the input video may has different fps, convert it to the target video fps before frame sampling. Returns: frames (tensor): decoded frames from the video. Return None if the no video stream was found. fps (float): the number of frames per second of the video. decode_all_video (bool): If True, the entire video was decoded.
def pyav_decode( container, sampling_rate, num_frames, clip_idx, num_clips=10, target_fps=30, start=None, end=None , duration=None, frames_length=None): """ Convert the video from its original fps to the target_fps. If the video support selective decoding (contain decoding information in the video head), the perform temporal selective decoding and sample a clip from the video with the PyAV decoder. If the video does not support selective decoding, decode the entire video. Args: container (container): pyav container. sampling_rate (int): frame sampling rate (interval between two sampled frames. num_frames (int): number of frames to sample. clip_idx (int): if clip_idx is -1, perform random temporal sampling. If clip_idx is larger than -1, uniformly split the video to num_clips clips, and select the clip_idx-th video clip. num_clips (int): overall number of clips to uniformly sample from the given video. target_fps (int): the input video may has different fps, convert it to the target video fps before frame sampling. Returns: frames (tensor): decoded frames from the video. Return None if the no video stream was found. fps (float): the number of frames per second of the video. decode_all_video (bool): If True, the entire video was decoded. """ # Try to fetch the decoding information from the video head. Some of the # videos does not support fetching the decoding information, for that case # it will get None duration. fps = float(container.streams.video[0].average_rate) orig_duration = duration tb = float(container.streams.video[0].time_base) frames_length = container.streams.video[0].frames duration = container.streams.video[0].duration if duration is None and orig_duration is not None: duration = orig_duration / tb if duration is None: # If failed to fetch the decoding information, decode the entire video. decode_all_video = True video_start_pts, video_end_pts = 0, math.inf else: # Perform selective decoding. decode_all_video = False start_idx, end_idx = get_start_end_idx( frames_length, sampling_rate * num_frames / target_fps * fps, clip_idx, num_clips, ) timebase = duration / frames_length video_start_pts = int(start_idx * timebase) video_end_pts = int(end_idx * timebase) if start is not None and end is not None: decode_all_video = False frames = None # If video stream was found, fetch video frames from the video. if container.streams.video: if start is None and end is None: video_frames, max_pts = pyav_decode_stream( container, video_start_pts, video_end_pts, container.streams.video[0], {"video": 0}, ) else: timebase = duration / frames_length start_i = start end_i = end video_frames, max_pts = pyav_decode_stream( container, start_i, end_i, container.streams.video[0], {"video": 0}, ) container.close() frames = [frame.to_rgb().to_ndarray() for frame in video_frames] frames = torch.as_tensor(np.stack(frames)) return frames, fps, decode_all_video
Decode the video and perform temporal sampling. Args: container (container): pyav container. sampling_rate (int): frame sampling rate (interval between two sampled frames). num_frames (int): number of frames to sample. clip_idx (int): if clip_idx is -1, perform random temporal sampling. If clip_idx is larger than -1, uniformly split the video to num_clips clips, and select the clip_idx-th video clip. num_clips (int): overall number of clips to uniformly sample from the given video. video_meta (dict): a dict contains VideoMetaData. Details can be find at `pytorch/vision/torchvision/io/_video_opt.py`. target_fps (int): the input video may have different fps, convert it to the target video fps before frame sampling. backend (str): decoding backend includes `pyav` and `torchvision`. The default one is `pyav`. max_spatial_scale (int): keep the aspect ratio and resize the frame so that shorter edge size is max_spatial_scale. Only used in `torchvision` backend. Returns: frames (tensor): decoded frames from the video.
def decode( container, sampling_rate, num_frames, clip_idx=-1, num_clips=10, video_meta=None, target_fps=30, backend="pyav", max_spatial_scale=0, start=None, end=None, duration=None, frames_length=None, ): """ Decode the video and perform temporal sampling. Args: container (container): pyav container. sampling_rate (int): frame sampling rate (interval between two sampled frames). num_frames (int): number of frames to sample. clip_idx (int): if clip_idx is -1, perform random temporal sampling. If clip_idx is larger than -1, uniformly split the video to num_clips clips, and select the clip_idx-th video clip. num_clips (int): overall number of clips to uniformly sample from the given video. video_meta (dict): a dict contains VideoMetaData. Details can be find at `pytorch/vision/torchvision/io/_video_opt.py`. target_fps (int): the input video may have different fps, convert it to the target video fps before frame sampling. backend (str): decoding backend includes `pyav` and `torchvision`. The default one is `pyav`. max_spatial_scale (int): keep the aspect ratio and resize the frame so that shorter edge size is max_spatial_scale. Only used in `torchvision` backend. Returns: frames (tensor): decoded frames from the video. """ # Currently support two decoders: 1) PyAV, and 2) TorchVision. assert clip_idx >= -1, "Not valied clip_idx {}".format(clip_idx) try: if backend == "pyav": frames, fps, decode_all_video = pyav_decode( container, sampling_rate, num_frames, clip_idx, num_clips, target_fps, start, end, duration, frames_length, ) elif backend == "torchvision": frames, fps, decode_all_video = torchvision_decode( container, sampling_rate, num_frames, clip_idx, video_meta, num_clips, target_fps, ("visual",), max_spatial_scale, ) else: raise NotImplementedError( "Unknown decoding backend {}".format(backend) ) except Exception as e: print("Failed to decode by {} with exception: {}".format(backend, e)) return None # Return None if the frames was not decoded successfully. if frames is None or frames.size(0) == 0: return None clip_sz = sampling_rate * num_frames / target_fps * fps start_idx, end_idx = get_start_end_idx( frames.shape[0], clip_sz, clip_idx if decode_all_video else 0, num_clips if decode_all_video else 1, ) # Perform temporal sampling from the decoded video. frames = temporal_sampling(frames, start_idx, end_idx, num_frames) return frames
Collate function for detection task. Concatanate bboxes, labels and metadata from different samples in the first dimension instead of stacking them to have a batch-size dimension. Args: batch (tuple or list): data batch to collate. Returns: (tuple): collated detection data batch.
def detection_collate(batch): """ Collate function for detection task. Concatanate bboxes, labels and metadata from different samples in the first dimension instead of stacking them to have a batch-size dimension. Args: batch (tuple or list): data batch to collate. Returns: (tuple): collated detection data batch. """ inputs, labels, video_idx, extra_data = zip(*batch) inputs, video_idx = default_collate(inputs), default_collate(video_idx) labels = torch.tensor(np.concatenate(labels, axis=0)).float() collated_extra_data = {} for key in extra_data[0].keys(): data = [d[key] for d in extra_data] if key == "boxes" or key == "ori_boxes": # Append idx info to the bboxes before concatenating them. bboxes = [ np.concatenate( [np.full((data[i].shape[0], 1), float(i)), data[i]], axis=1 ) for i in range(len(data)) ] bboxes = np.concatenate(bboxes, axis=0) collated_extra_data[key] = torch.tensor(bboxes).float() elif key == "metadata": collated_extra_data[key] = torch.tensor( list(itertools.chain(*data)) ).view(-1, 2) else: collated_extra_data[key] = default_collate(data) return inputs, labels, video_idx, collated_extra_data
Constructs the data loader for the given dataset. Args: cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py split (str): the split of the data loader. Options include `train`, `val`, and `test`.
def construct_loader(cfg, split, is_precise_bn=False): """ Constructs the data loader for the given dataset. Args: cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py split (str): the split of the data loader. Options include `train`, `val`, and `test`. """ assert split in ["train", "val", "test"] if split in ["train"]: dataset_name = cfg.TRAIN.DATASET batch_size = int(cfg.TRAIN.BATCH_SIZE / max(1, cfg.NUM_GPUS)) shuffle = True drop_last = True elif split in ["val"]: dataset_name = cfg.TRAIN.DATASET batch_size = int(cfg.TRAIN.BATCH_SIZE / max(1, cfg.NUM_GPUS)) shuffle = False drop_last = False elif split in ["test"]: dataset_name = cfg.TEST.DATASET batch_size = int(cfg.TEST.BATCH_SIZE / max(1, cfg.NUM_GPUS)) shuffle = False drop_last = False # Construct the dataset dataset = build_dataset(dataset_name, cfg, split) if cfg.MULTIGRID.SHORT_CYCLE and split in ["train"] and not is_precise_bn: # Create a sampler for multi-process training sampler = utils.create_sampler(dataset, shuffle, cfg) batch_sampler = ShortCycleBatchSampler( sampler, batch_size=batch_size, drop_last=drop_last, cfg=cfg ) # Create a loader loader = torch.utils.data.DataLoader( dataset, batch_sampler=batch_sampler, num_workers=cfg.DATA_LOADER.NUM_WORKERS, pin_memory=cfg.DATA_LOADER.PIN_MEMORY, worker_init_fn=utils.loader_worker_init_fn(dataset), ) else: # Create a sampler for multi-process training sampler = utils.create_sampler(dataset, shuffle, cfg) # Create a loader loader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=(False if sampler else shuffle), sampler=sampler, num_workers=cfg.DATA_LOADER.NUM_WORKERS, pin_memory=cfg.DATA_LOADER.PIN_MEMORY, drop_last=drop_last, collate_fn=detection_collate if cfg.DETECTION.ENABLE else None, worker_init_fn=utils.loader_worker_init_fn(dataset), ) return loader
" Shuffles the data. Args: loader (loader): data loader to perform shuffle. cur_epoch (int): number of the current epoch.
def shuffle_dataset(loader, cur_epoch): """ " Shuffles the data. Args: loader (loader): data loader to perform shuffle. cur_epoch (int): number of the current epoch. """ sampler = ( loader.batch_sampler.sampler if isinstance(loader.batch_sampler, ShortCycleBatchSampler) else loader.sampler ) assert isinstance( sampler, (RandomSampler, DistributedSampler) ), "Sampler type '{}' not supported".format(type(sampler)) # RandomSampler handles shuffling automatically if isinstance(sampler, DistributedSampler): # DistributedSampler shuffles data based on epoch sampler.set_epoch(cur_epoch)
Perform a spatial short scale jittering on the given images and corresponding boxes. Args: images (tensor): images to perform scale jitter. Dimension is `num frames` x `channel` x `height` x `width`. min_size (int): the minimal size to scale the frames. max_size (int): the maximal size to scale the frames. boxes (ndarray): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. inverse_uniform_sampling (bool): if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a reciprocal to get the scale. If False, take a uniform sample from [min_scale, max_scale]. Returns: (tensor): the scaled images with dimension of `num frames` x `channel` x `new height` x `new width`. (ndarray or None): the scaled boxes with dimension of `num boxes` x 4.
def random_short_side_scale_jitter( images, min_size, max_size, boxes=None, inverse_uniform_sampling=False ): """ Perform a spatial short scale jittering on the given images and corresponding boxes. Args: images (tensor): images to perform scale jitter. Dimension is `num frames` x `channel` x `height` x `width`. min_size (int): the minimal size to scale the frames. max_size (int): the maximal size to scale the frames. boxes (ndarray): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. inverse_uniform_sampling (bool): if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a reciprocal to get the scale. If False, take a uniform sample from [min_scale, max_scale]. Returns: (tensor): the scaled images with dimension of `num frames` x `channel` x `new height` x `new width`. (ndarray or None): the scaled boxes with dimension of `num boxes` x 4. """ if inverse_uniform_sampling: size = int( round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size)) ) else: size = int(round(np.random.uniform(min_size, max_size))) height = images.shape[2] width = images.shape[3] if (width <= height and width == size) or ( height <= width and height == size ): return images, boxes new_width = size new_height = size if width < height: new_height = int(math.floor((float(height) / width) * size)) if boxes is not None: boxes = boxes * float(new_height) / height else: new_width = int(math.floor((float(width) / height) * size)) if boxes is not None: boxes = boxes * float(new_width) / width return ( torch.nn.functional.interpolate( images, size=(new_height, new_width), mode="bilinear", align_corners=False, ), boxes, )
Peform crop on the bounding boxes given the offsets. Args: boxes (ndarray or None): bounding boxes to peform crop. The dimension is `num boxes` x 4. x_offset (int): cropping offset in the x axis. y_offset (int): cropping offset in the y axis. Returns: cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4.
def crop_boxes(boxes, x_offset, y_offset): """ Peform crop on the bounding boxes given the offsets. Args: boxes (ndarray or None): bounding boxes to peform crop. The dimension is `num boxes` x 4. x_offset (int): cropping offset in the x axis. y_offset (int): cropping offset in the y axis. Returns: cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ cropped_boxes = boxes.copy() cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset return cropped_boxes
Perform random spatial crop on the given images and corresponding boxes. Args: images (tensor): images to perform random crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): the size of height and width to crop on the image. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (tensor): cropped images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4.
def random_crop(images, size, boxes=None): """ Perform random spatial crop on the given images and corresponding boxes. Args: images (tensor): images to perform random crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): the size of height and width to crop on the image. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (tensor): cropped images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ if images.shape[2] == size and images.shape[3] == size: return images, None height = images.shape[2] width = images.shape[3] y_offset = 0 if height > size: y_offset = int(np.random.randint(0, height - size)) x_offset = 0 if width > size: x_offset = int(np.random.randint(0, width - size)) cropped = images[ :, :, y_offset : y_offset + size, x_offset : x_offset + size ] cropped_boxes = ( crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None ) return cropped, cropped_boxes
Perform horizontal flip on the given images and corresponding boxes. Args: prob (float): probility to flip the images. images (tensor): images to perform horizontal flip, the dimension is `num frames` x `channel` x `height` x `width`. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: images (tensor): images with dimension of `num frames` x `channel` x `height` x `width`. flipped_boxes (ndarray or None): the flipped boxes with dimension of `num boxes` x 4.
def horizontal_flip(prob, images, boxes=None): """ Perform horizontal flip on the given images and corresponding boxes. Args: prob (float): probility to flip the images. images (tensor): images to perform horizontal flip, the dimension is `num frames` x `channel` x `height` x `width`. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: images (tensor): images with dimension of `num frames` x `channel` x `height` x `width`. flipped_boxes (ndarray or None): the flipped boxes with dimension of `num boxes` x 4. """ if boxes is None: flipped_boxes = None else: flipped_boxes = boxes.copy() if np.random.uniform() < prob: images = images.flip((-1)) width = images.shape[3] if boxes is not None: flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1 return images, flipped_boxes
Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4.
def uniform_crop(images, size, spatial_idx, boxes=None): """ Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ assert spatial_idx in [0, 1, 2] height = images.shape[2] width = images.shape[3] y_offset = int(math.ceil((height - size) / 2)) x_offset = int(math.ceil((width - size) / 2)) if height > width: if spatial_idx == 0: y_offset = 0 elif spatial_idx == 2: y_offset = height - size else: if spatial_idx == 0: x_offset = 0 elif spatial_idx == 2: x_offset = width - size cropped = images[ :, :, y_offset : y_offset + size, x_offset : x_offset + size ] cropped_boxes = ( crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None ) return cropped, cropped_boxes
Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4.
def uniform_crop_2crops(images, size, spatial_idx, boxes=None): """ Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ assert spatial_idx in [0, 1, 2] height = images.shape[2] width = images.shape[3] if height > width: x_offset = 0 if height > size * 2: if spatial_idx == 0: y_offset = int((height - size * 2) // 2) elif spatial_idx == 1: y_offset = int(height - size - ((height - size * 2) // 2)) else: if spatial_idx == 0: y_offset = 0 elif spatial_idx == 1: y_offset = height - size else: y_offset = 0 if width > size * 2: if spatial_idx == 0: x_offset = int((width - size * 2) // 2) elif spatial_idx == 1: x_offset = int(width - size - ((width - size * 2) // 2)) else: if spatial_idx == 0: x_offset = 0 elif spatial_idx == 1: x_offset = width - size cropped = images[ :, :, y_offset : y_offset + size, x_offset : x_offset + size ] cropped_boxes = ( crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None ) return cropped, cropped_boxes
Clip an array of boxes to an image with the given height and width. Args: boxes (ndarray): bounding boxes to perform clipping. Dimension is `num boxes` x 4. height (int): given image height. width (int): given image width. Returns: clipped_boxes (ndarray): the clipped boxes with dimension of `num boxes` x 4.
def clip_boxes_to_image(boxes, height, width): """ Clip an array of boxes to an image with the given height and width. Args: boxes (ndarray): bounding boxes to perform clipping. Dimension is `num boxes` x 4. height (int): given image height. width (int): given image width. Returns: clipped_boxes (ndarray): the clipped boxes with dimension of `num boxes` x 4. """ clipped_boxes = boxes.copy() clipped_boxes[:, [0, 2]] = np.minimum( width - 1.0, np.maximum(0.0, boxes[:, [0, 2]]) ) clipped_boxes[:, [1, 3]] = np.minimum( height - 1.0, np.maximum(0.0, boxes[:, [1, 3]]) ) return clipped_boxes
Blend two images with a given weight alpha. Args: images1 (tensor): the first images to be blended, the dimension is `num frames` x `channel` x `height` x `width`. images2 (tensor): the second images to be blended, the dimension is `num frames` x `channel` x `height` x `width`. alpha (float): the blending weight. Returns: (tensor): blended images, the dimension is `num frames` x `channel` x `height` x `width`.
def blend(images1, images2, alpha): """ Blend two images with a given weight alpha. Args: images1 (tensor): the first images to be blended, the dimension is `num frames` x `channel` x `height` x `width`. images2 (tensor): the second images to be blended, the dimension is `num frames` x `channel` x `height` x `width`. alpha (float): the blending weight. Returns: (tensor): blended images, the dimension is `num frames` x `channel` x `height` x `width`. """ return images1 * alpha + images2 * (1 - alpha)
Get the grayscale for the input images. The channels of images should be in order BGR. Args: images (tensor): the input images for getting grayscale. Dimension is `num frames` x `channel` x `height` x `width`. Returns: img_gray (tensor): blended images, the dimension is `num frames` x `channel` x `height` x `width`.
def grayscale(images): """ Get the grayscale for the input images. The channels of images should be in order BGR. Args: images (tensor): the input images for getting grayscale. Dimension is `num frames` x `channel` x `height` x `width`. Returns: img_gray (tensor): blended images, the dimension is `num frames` x `channel` x `height` x `width`. """ # R -> 0.299, G -> 0.587, B -> 0.114. img_gray = torch.tensor(images) gray_channel = ( 0.299 * images[:, 2] + 0.587 * images[:, 1] + 0.114 * images[:, 0] ) img_gray[:, 0] = gray_channel img_gray[:, 1] = gray_channel img_gray[:, 2] = gray_channel return img_gray
Perfrom a color jittering on the input images. The channels of images should be in order BGR. Args: images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`.
def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0): """ Perfrom a color jittering on the input images. The channels of images should be in order BGR. Args: images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ jitter = [] if img_brightness != 0: jitter.append("brightness") if img_contrast != 0: jitter.append("contrast") if img_saturation != 0: jitter.append("saturation") if len(jitter) > 0: order = np.random.permutation(np.arange(len(jitter))) for idx in range(0, len(jitter)): if jitter[order[idx]] == "brightness": images = brightness_jitter(img_brightness, images) elif jitter[order[idx]] == "contrast": images = contrast_jitter(img_contrast, images) elif jitter[order[idx]] == "saturation": images = saturation_jitter(img_saturation, images) return images
Perfrom brightness jittering on the input images. The channels of images should be in order BGR. Args: var (float): jitter ratio for brightness. images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`.
def brightness_jitter(var, images): """ Perfrom brightness jittering on the input images. The channels of images should be in order BGR. Args: var (float): jitter ratio for brightness. images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ alpha = 1.0 + np.random.uniform(-var, var) img_bright = torch.zeros(images.shape) images = blend(images, img_bright, alpha) return images
Perfrom contrast jittering on the input images. The channels of images should be in order BGR. Args: var (float): jitter ratio for contrast. images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`.
def contrast_jitter(var, images): """ Perfrom contrast jittering on the input images. The channels of images should be in order BGR. Args: var (float): jitter ratio for contrast. images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ alpha = 1.0 + np.random.uniform(-var, var) img_gray = grayscale(images) img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True) images = blend(images, img_gray, alpha) return images
Perfrom saturation jittering on the input images. The channels of images should be in order BGR. Args: var (float): jitter ratio for saturation. images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`.
def saturation_jitter(var, images): """ Perfrom saturation jittering on the input images. The channels of images should be in order BGR. Args: var (float): jitter ratio for saturation. images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ alpha = 1.0 + np.random.uniform(-var, var) img_gray = grayscale(images) images = blend(images, img_gray, alpha) return images
Perform AlexNet-style PCA jitter on the given images. Args: images (tensor): images to perform lighting jitter. Dimension is `num frames` x `channel` x `height` x `width`. alphastd (float): jitter ratio for PCA jitter. eigval (list): eigenvalues for PCA jitter. eigvec (list[list]): eigenvectors for PCA jitter. Returns: out_images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`.
def lighting_jitter(images, alphastd, eigval, eigvec): """ Perform AlexNet-style PCA jitter on the given images. Args: images (tensor): images to perform lighting jitter. Dimension is `num frames` x `channel` x `height` x `width`. alphastd (float): jitter ratio for PCA jitter. eigval (list): eigenvalues for PCA jitter. eigvec (list[list]): eigenvectors for PCA jitter. Returns: out_images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ if alphastd == 0: return images # generate alpha1, alpha2, alpha3. alpha = np.random.normal(0, alphastd, size=(1, 3)) eig_vec = np.array(eigvec) eig_val = np.reshape(eigval, (1, 3)) rgb = np.sum( eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0), axis=1, ) out_images = torch.zeros_like(images) for idx in range(images.shape[1]): out_images[:, idx] = images[:, idx] + rgb[2 - idx] return out_images
Perform color nomration on the given images. Args: images (tensor): images to perform color normalization. Dimension is `num frames` x `channel` x `height` x `width`. mean (list): mean values for normalization. stddev (list): standard deviations for normalization. Returns: out_images (tensor): the noramlized images, the dimension is `num frames` x `channel` x `height` x `width`.
def color_normalization(images, mean, stddev): """ Perform color nomration on the given images. Args: images (tensor): images to perform color normalization. Dimension is `num frames` x `channel` x `height` x `width`. mean (list): mean values for normalization. stddev (list): standard deviations for normalization. Returns: out_images (tensor): the noramlized images, the dimension is `num frames` x `channel` x `height` x `width`. """ assert len(mean) == images.shape[1], "channel mean not computed properly" assert ( len(stddev) == images.shape[1] ), "channel stddev not computed properly" out_images = torch.zeros_like(images) for idx in range(len(mean)): out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx] return out_images
This function is to load images with support of retrying for failed load. Args: image_paths (list): paths of images needed to be loaded. retry (int, optional): maximum time of loading retrying. Defaults to 10. backend (str): `pytorch` or `cv2`. Returns: imgs (list): list of loaded images.
def retry_load_images(image_paths, retry=10, backend="pytorch"): """ This function is to load images with support of retrying for failed load. Args: image_paths (list): paths of images needed to be loaded. retry (int, optional): maximum time of loading retrying. Defaults to 10. backend (str): `pytorch` or `cv2`. Returns: imgs (list): list of loaded images. """ for i in range(retry): imgs = [] for image_path in image_paths: with PathManager.open(image_path, "rb") as f: img_str = np.frombuffer(f.read(), np.uint8) img = cv2.imdecode(img_str, flags=cv2.IMREAD_COLOR) imgs.append(img) if all(img is not None for img in imgs): if backend == "pytorch": imgs = torch.as_tensor(np.stack(imgs)) return imgs else: logger.warn("Reading failed. Will retry.") time.sleep(1.0) if i == retry - 1: raise Exception("Failed to load images {}".format(image_paths))
Sample frames among the corresponding clip. Args: center_idx (int): center frame idx for current clip half_len (int): half of the clip length sample_rate (int): sampling rate for sampling frames inside of the clip num_frames (int): number of expected sampled frames Returns: seq (list): list of indexes of sampled frames in this clip.
def get_sequence(center_idx, half_len, sample_rate, num_frames): """ Sample frames among the corresponding clip. Args: center_idx (int): center frame idx for current clip half_len (int): half of the clip length sample_rate (int): sampling rate for sampling frames inside of the clip num_frames (int): number of expected sampled frames Returns: seq (list): list of indexes of sampled frames in this clip. """ seq = list(range(center_idx - half_len, center_idx + half_len, sample_rate)) for seq_idx in range(len(seq)): if seq[seq_idx] < 0: seq[seq_idx] = 0 elif seq[seq_idx] >= num_frames: seq[seq_idx] = num_frames - 1 return seq
Prepare output as a list of tensors. Each tensor corresponding to a unique pathway. Args: frames (tensor): frames of images sampled from the video. The dimension is `channel` x `num frames` x `height` x `width`. Returns: frame_list (list): list of tensors with the dimension of `channel` x `num frames` x `height` x `width`.
def pack_pathway_output(cfg, frames): """ Prepare output as a list of tensors. Each tensor corresponding to a unique pathway. Args: frames (tensor): frames of images sampled from the video. The dimension is `channel` x `num frames` x `height` x `width`. Returns: frame_list (list): list of tensors with the dimension of `channel` x `num frames` x `height` x `width`. """ if cfg.DATA.REVERSE_INPUT_CHANNEL: frames = frames[[2, 1, 0], :, :, :] if cfg.MODEL.ARCH in cfg.MODEL.SINGLE_PATHWAY_ARCH: frame_list = [frames] elif cfg.MODEL.ARCH in cfg.MODEL.MULTI_PATHWAY_ARCH: fast_pathway = frames # Perform temporal sampling from the fast pathway. slow_pathway = torch.index_select( frames, 1, torch.linspace( 0, frames.shape[1] - 1, frames.shape[1] // cfg.SLOWFAST.ALPHA ).long(), ) frame_list = [slow_pathway, fast_pathway] else: raise NotImplementedError( "Model arch {} is not in {}".format( cfg.MODEL.ARCH, cfg.MODEL.SINGLE_PATHWAY_ARCH + cfg.MODEL.MULTI_PATHWAY_ARCH, ) ) return frame_list
Perform spatial sampling on the given video frames. If spatial_idx is -1, perform random scale, random crop, and random flip on the given frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling with the given spatial_idx. Args: frames (tensor): frames of images sampled from the video. The dimension is `num frames` x `height` x `width` x `channel`. spatial_idx (int): if -1, perform random spatial sampling. If 0, 1, or 2, perform left, center, right crop if width is larger than height, and perform top, center, buttom crop if height is larger than width. min_scale (int): the minimal size of scaling. max_scale (int): the maximal size of scaling. crop_size (int): the size of height and width used to crop the frames. inverse_uniform_sampling (bool): if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a reciprocal to get the scale. If False, take a uniform sample from [min_scale, max_scale]. Returns: frames (tensor): spatially sampled frames.
def spatial_sampling( frames, spatial_idx=-1, min_scale=256, max_scale=320, crop_size=224, random_horizontal_flip=True, inverse_uniform_sampling=False, ): """ Perform spatial sampling on the given video frames. If spatial_idx is -1, perform random scale, random crop, and random flip on the given frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling with the given spatial_idx. Args: frames (tensor): frames of images sampled from the video. The dimension is `num frames` x `height` x `width` x `channel`. spatial_idx (int): if -1, perform random spatial sampling. If 0, 1, or 2, perform left, center, right crop if width is larger than height, and perform top, center, buttom crop if height is larger than width. min_scale (int): the minimal size of scaling. max_scale (int): the maximal size of scaling. crop_size (int): the size of height and width used to crop the frames. inverse_uniform_sampling (bool): if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a reciprocal to get the scale. If False, take a uniform sample from [min_scale, max_scale]. Returns: frames (tensor): spatially sampled frames. """ assert spatial_idx in [-1, 0, 1, 2] if spatial_idx == -1: frames, _ = transform.random_short_side_scale_jitter( images=frames, min_size=min_scale, max_size=max_scale, inverse_uniform_sampling=inverse_uniform_sampling, ) frames, _ = transform.random_crop(frames, crop_size) if random_horizontal_flip: frames, _ = transform.horizontal_flip(0.5, frames) else: # The testing is deterministic and no jitter should be performed. # min_scale, max_scale, and crop_size are expect to be the same. #assert len({min_scale, max_scale, crop_size}) == 1 frames, _ = transform.random_short_side_scale_jitter( frames, min_scale, max_scale ) frames, _ = transform.uniform_crop(frames, crop_size, spatial_idx) return frames
Perform spatial sampling on the given video frames. If spatial_idx is -1, perform random scale, random crop, and random flip on the given frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling with the given spatial_idx. Args: frames (tensor): frames of images sampled from the video. The dimension is `num frames` x `height` x `width` x `channel`. spatial_idx (int): if -1, perform random spatial sampling. If 0, 1, or 2, perform left, center, right crop if width is larger than height, and perform top, center, buttom crop if height is larger than width. min_scale (int): the minimal size of scaling. max_scale (int): the maximal size of scaling. crop_size (int): the size of height and width used to crop the frames. inverse_uniform_sampling (bool): if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a reciprocal to get the scale. If False, take a uniform sample from [min_scale, max_scale]. Returns: frames (tensor): spatially sampled frames.
def spatial_sampling_2crops( frames, spatial_idx=-1, min_scale=256, max_scale=320, crop_size=224, random_horizontal_flip=True, inverse_uniform_sampling=False, ): """ Perform spatial sampling on the given video frames. If spatial_idx is -1, perform random scale, random crop, and random flip on the given frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling with the given spatial_idx. Args: frames (tensor): frames of images sampled from the video. The dimension is `num frames` x `height` x `width` x `channel`. spatial_idx (int): if -1, perform random spatial sampling. If 0, 1, or 2, perform left, center, right crop if width is larger than height, and perform top, center, buttom crop if height is larger than width. min_scale (int): the minimal size of scaling. max_scale (int): the maximal size of scaling. crop_size (int): the size of height and width used to crop the frames. inverse_uniform_sampling (bool): if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a reciprocal to get the scale. If False, take a uniform sample from [min_scale, max_scale]. Returns: frames (tensor): spatially sampled frames. """ assert spatial_idx in [-1, 0, 1, 2] if spatial_idx == -1: frames, _ = transform.random_short_side_scale_jitter( images=frames, min_size=min_scale, max_size=max_scale, inverse_uniform_sampling=inverse_uniform_sampling, ) frames, _ = transform.random_crop(frames, crop_size) if random_horizontal_flip: frames, _ = transform.horizontal_flip(0.5, frames) else: # The testing is deterministic and no jitter should be performed. # min_scale, max_scale, and crop_size are expect to be the same. #assert len({min_scale, max_scale, crop_size}) == 1 frames, _ = transform.random_short_side_scale_jitter( frames, min_scale, max_scale ) frames, _ = transform.uniform_crop_2crops(frames, crop_size, spatial_idx) return frames
Construct binary label vector given a list of label indices. Args: labels (list): The input label list. num_classes (int): Number of classes of the label vector. Returns: labels (numpy array): the resulting binary vector.
def as_binary_vector(labels, num_classes): """ Construct binary label vector given a list of label indices. Args: labels (list): The input label list. num_classes (int): Number of classes of the label vector. Returns: labels (numpy array): the resulting binary vector. """ label_arr = np.zeros((num_classes,)) for lbl in set(labels): label_arr[lbl] = 1.0 return label_arr
Join a list of label list. Args: labels (list): The input label list. Returns: labels (list): The joint list of all lists in input.
def aggregate_labels(label_list): """ Join a list of label list. Args: labels (list): The input label list. Returns: labels (list): The joint list of all lists in input. """ all_labels = [] for labels in label_list: for l in labels: all_labels.append(l) return list(set(all_labels))
Aggregate annotations from all frames of a video to form video-level labels. Args: labels (list): The input label list. Returns: labels (list): Same as input, but with each label replaced by a video-level one.
def convert_to_video_level_labels(labels): """ Aggregate annotations from all frames of a video to form video-level labels. Args: labels (list): The input label list. Returns: labels (list): Same as input, but with each label replaced by a video-level one. """ for video_id in range(len(labels)): video_level_labels = aggregate_labels(labels[video_id]) for i in range(len(labels[video_id])): labels[video_id][i] = video_level_labels return labels
Load image paths and labels from a "frame list". Each line of the frame list contains: `original_vido_id video_id frame_id path labels` Args: frame_list_file (string): path to the frame list. prefix (str): the prefix for the path. return_list (bool): if True, return a list. If False, return a dict. Returns: image_paths (list or dict): list of list containing path to each frame. If return_list is False, then return in a dict form. labels (list or dict): list of list containing label of each frame. If return_list is False, then return in a dict form.
def load_image_lists(frame_list_file, prefix="", return_list=False): """ Load image paths and labels from a "frame list". Each line of the frame list contains: `original_vido_id video_id frame_id path labels` Args: frame_list_file (string): path to the frame list. prefix (str): the prefix for the path. return_list (bool): if True, return a list. If False, return a dict. Returns: image_paths (list or dict): list of list containing path to each frame. If return_list is False, then return in a dict form. labels (list or dict): list of list containing label of each frame. If return_list is False, then return in a dict form. """ image_paths = defaultdict(list) labels = defaultdict(list) with PathManager.open(frame_list_file, "r") as f: assert f.readline().startswith("original_vido_id") for line in f: row = line.split() # original_vido_id video_id frame_id path labels assert len(row) == 5 video_name = row[0] if prefix == "": path = row[3] else: path = os.path.join(prefix, row[3]) image_paths[video_name].append(path) frame_labels = row[-1].replace('"', "") if frame_labels != "": labels[video_name].append( [int(x) for x in frame_labels.split(",")] ) else: labels[video_name].append([]) if return_list: keys = image_paths.keys() image_paths = [image_paths[key] for key in keys] labels = [labels[key] for key in keys] return image_paths, labels return dict(image_paths), dict(labels)
Normalize a given tensor by subtracting the mean and dividing the std. Args: tensor (tensor): tensor to normalize. mean (tensor or list): mean value to subtract. std (tensor or list): std to divide.
def tensor_normalize(tensor, mean, std): """ Normalize a given tensor by subtracting the mean and dividing the std. Args: tensor (tensor): tensor to normalize. mean (tensor or list): mean value to subtract. std (tensor or list): std to divide. """ if tensor.dtype == torch.uint8: tensor = tensor.float() tensor = tensor / 255.0 if type(mean) == list: mean = torch.tensor(mean) if type(std) == list: std = torch.tensor(std) tensor = tensor - mean tensor = tensor / std return tensor
When multigrid training uses a fewer number of frames, we randomly increase the sampling rate so that some clips cover the original span.
def get_random_sampling_rate(long_cycle_sampling_rate, sampling_rate): """ When multigrid training uses a fewer number of frames, we randomly increase the sampling rate so that some clips cover the original span. """ if long_cycle_sampling_rate > 0: assert long_cycle_sampling_rate >= sampling_rate return random.randint(sampling_rate, long_cycle_sampling_rate) else: return sampling_rate
Revert normalization for a given tensor by multiplying by the std and adding the mean. Args: tensor (tensor): tensor to revert normalization. mean (tensor or list): mean value to add. std (tensor or list): std to multiply.
def revert_tensor_normalize(tensor, mean, std): """ Revert normalization for a given tensor by multiplying by the std and adding the mean. Args: tensor (tensor): tensor to revert normalization. mean (tensor or list): mean value to add. std (tensor or list): std to multiply. """ if type(mean) == list: mean = torch.tensor(mean) if type(std) == list: std = torch.tensor(std) tensor = tensor * std tensor = tensor + mean return tensor
Create sampler for the given dataset. Args: dataset (torch.utils.data.Dataset): the given dataset. shuffle (bool): set to ``True`` to have the data reshuffled at every epoch. cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py Returns: sampler (Sampler): the created sampler.
def create_sampler(dataset, shuffle, cfg): """ Create sampler for the given dataset. Args: dataset (torch.utils.data.Dataset): the given dataset. shuffle (bool): set to ``True`` to have the data reshuffled at every epoch. cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py Returns: sampler (Sampler): the created sampler. """ sampler = DistributedSampler(dataset) if cfg.NUM_GPUS > 1 else None return sampler
Create init function passed to pytorch data loader. Args: dataset (torch.utils.data.Dataset): the given dataset.
def loader_worker_init_fn(dataset): """ Create init function passed to pytorch data loader. Args: dataset (torch.utils.data.Dataset): the given dataset. """ return None
Given the path to the video, return the pyav video container. Args: path_to_vid (str): path to the video. multi_thread_decode (bool): if True, perform multi-thread decoding. backend (str): decoder backend, options include `pyav` and `torchvision`, default is `pyav`. Returns: container (container): video container.
def get_video_container(path_to_vid, multi_thread_decode=False, backend="pyav"): """ Given the path to the video, return the pyav video container. Args: path_to_vid (str): path to the video. multi_thread_decode (bool): if True, perform multi-thread decoding. backend (str): decoder backend, options include `pyav` and `torchvision`, default is `pyav`. Returns: container (container): video container. """ if backend == "torchvision": with open(path_to_vid, "rb") as fp: container = fp.read() return container elif backend == "pyav": #try: container = av.open(path_to_vid) if multi_thread_decode: # Enable multiple threads for decoding. container.streams.video[0].thread_type = "AUTO" #except: # container = None return container else: raise NotImplementedError("Unknown backend {}".format(backend))
Args: cfg (CfgNode): model building configs, details are in the comments of the config file. Returns: nn.Module: the normalization layer.
def get_norm(cfg): """ Args: cfg (CfgNode): model building configs, details are in the comments of the config file. Returns: nn.Module: the normalization layer. """ if cfg.BN.NORM_TYPE == "batchnorm": return nn.BatchNorm3d elif cfg.BN.NORM_TYPE == "sub_batchnorm": return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS) elif cfg.BN.NORM_TYPE == "sync_batchnorm": return partial( NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.NUM_SYNC_DEVICES ) else: raise NotImplementedError( "Norm type {} is not supported".format(cfg.BN.NORM_TYPE) )
Builds the video model. Args: cfg (configs): configs that contains the hyper-parameters to build the backbone. Details can be seen in slowfast/config/defaults.py. gpu_id (Optional[int]): specify the gpu index to build model.
def build_model(cfg, gpu_id=None): """ Builds the video model. Args: cfg (configs): configs that contains the hyper-parameters to build the backbone. Details can be seen in slowfast/config/defaults.py. gpu_id (Optional[int]): specify the gpu index to build model. """ if torch.cuda.is_available(): assert ( cfg.NUM_GPUS <= torch.cuda.device_count() ), "Cannot use more GPU devices than available" else: assert ( cfg.NUM_GPUS == 0 ), "Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs." # Construct the model name = cfg.MODEL.MODEL_NAME model = MODEL_REGISTRY.get(name)(cfg) if cfg.NUM_GPUS: if gpu_id is None: # Determine the GPU used by the current process cur_device = torch.cuda.current_device() else: cur_device = gpu_id # Transfer the model to the current GPU device model = model.cuda(device=cur_device) # Use multi-process data parallel model in the multi-gpu setting if cfg.NUM_GPUS > 1: # Make model replica operate on the current device model = torch.nn.parallel.DistributedDataParallel( module=model, device_ids=[cur_device], output_device=cur_device ) return model
Retrieve the loss given the loss name. Args (int): loss_name: the name of the loss to use.
def get_loss_func(loss_name): """ Retrieve the loss given the loss name. Args (int): loss_name: the name of the loss to use. """ if loss_name not in _LOSSES.keys(): raise NotImplementedError("Loss {} is not supported".format(loss_name)) return _LOSSES[loss_name]
Construct a stochastic gradient descent or ADAM optimizer with momentum. Details can be found in: Herbert Robbins, and Sutton Monro. "A stochastic approximation method." and Diederik P.Kingma, and Jimmy Ba. "Adam: A Method for Stochastic Optimization." Args: model (model): model to perform stochastic gradient descent optimization or ADAM optimization. cfg (config): configs of hyper-parameters of SGD or ADAM, includes base learning rate, momentum, weight_decay, dampening, and etc.
def construct_optimizer(model, cfg): """ Construct a stochastic gradient descent or ADAM optimizer with momentum. Details can be found in: Herbert Robbins, and Sutton Monro. "A stochastic approximation method." and Diederik P.Kingma, and Jimmy Ba. "Adam: A Method for Stochastic Optimization." Args: model (model): model to perform stochastic gradient descent optimization or ADAM optimization. cfg (config): configs of hyper-parameters of SGD or ADAM, includes base learning rate, momentum, weight_decay, dampening, and etc. """ # Batchnorm parameters. bn_params = [] # Non-batchnorm parameters. non_bn_parameters = [] for name, p in model.named_parameters(): if "bn" in name: bn_params.append(p) else: non_bn_parameters.append(p) # Apply different weight decay to Batchnorm and non-batchnorm parameters. # In Caffe2 classification codebase the weight decay for batchnorm is 0.0. # Having a different weight decay on batchnorm might cause a performance # drop. optim_params = [ {"params": bn_params, "weight_decay": cfg.BN.WEIGHT_DECAY}, {"params": non_bn_parameters, "weight_decay": cfg.SOLVER.WEIGHT_DECAY}, ] # Check all parameters will be passed into optimizer. assert len(list(model.parameters())) == len(non_bn_parameters) + len( bn_params ), "parameter size does not match: {} + {} != {}".format( len(non_bn_parameters), len(bn_params), len(list(model.parameters())) ) if cfg.SOLVER.OPTIMIZING_METHOD == "sgd": return torch.optim.SGD( optim_params, lr=cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY, dampening=cfg.SOLVER.DAMPENING, nesterov=cfg.SOLVER.NESTEROV, ) elif cfg.SOLVER.OPTIMIZING_METHOD == "adam": return torch.optim.Adam( optim_params, lr=cfg.SOLVER.BASE_LR, betas=(0.9, 0.999), eps=1e-08, weight_decay=cfg.SOLVER.WEIGHT_DECAY, ) elif cfg.SOLVER.OPTIMIZING_METHOD == "adamw": return torch.optim.AdamW( optim_params, lr=cfg.SOLVER.BASE_LR, betas=(0.9, 0.999), eps=1e-08, weight_decay=cfg.SOLVER.WEIGHT_DECAY, ) else: raise NotImplementedError( "Does not support {} optimizer".format(cfg.SOLVER.OPTIMIZING_METHOD) )
Retrieves the lr for the given epoch (as specified by the lr policy). Args: cfg (config): configs of hyper-parameters of ADAM, includes base learning rate, betas, and weight decays. cur_epoch (float): the number of epoch of the current training stage.
def get_epoch_lr(cur_epoch, cfg): """ Retrieves the lr for the given epoch (as specified by the lr policy). Args: cfg (config): configs of hyper-parameters of ADAM, includes base learning rate, betas, and weight decays. cur_epoch (float): the number of epoch of the current training stage. """ return lr_policy.get_lr_at_epoch(cfg, cur_epoch)
Sets the optimizer lr to the specified value. Args: optimizer (optim): the optimizer using to optimize the current network. new_lr (float): the new learning rate to set.
def set_lr(optimizer, new_lr): """ Sets the optimizer lr to the specified value. Args: optimizer (optim): the optimizer using to optimize the current network. new_lr (float): the new learning rate to set. """ for param_group in optimizer.param_groups: param_group["lr"] = new_lr
Retrieves the transformation module by name.
def get_trans_func(name): """ Retrieves the transformation module by name. """ trans_funcs = { "bottleneck_transform": BottleneckTransform, "basic_transform": BasicTransform, "x3d_transform": X3DTransform, } assert ( name in trans_funcs.keys() ), "Transformation function '{}' not supported".format(name) return trans_funcs[name]
Retrieves the stem module by name.
def get_stem_func(name): """ Retrieves the stem module by name. """ trans_funcs = {"x3d_stem": X3DStem, "basic_stem": ResNetBasicStem} assert ( name in trans_funcs.keys() ), "Transformation function '{}' not supported".format(name) return trans_funcs[name]
convert patch embedding weight from manual patchify + linear proj to conv
def _conv_filter(state_dict, patch_size=16): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k: if v.shape[-1] != patch_size: patch_size = v.shape[-1] v = v.reshape((v.shape[0], 3, patch_size, patch_size)) out_dict[k] = v return out_dict
Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w)
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): # type: (Tensor, float, float, float, float) -> Tensor r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) """ return _no_grad_trunc_normal_(tensor, mean, std, a, b)
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument.
def drop_path(x, drop_prob: float = 0., training: bool = False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output
Returns a unique identifier for a video id & timestamp.
def make_image_key(video_id, timestamp): """Returns a unique identifier for a video id & timestamp.""" return "%s,%04d" % (video_id, int(timestamp))
Loads boxes and class labels from a CSV file in the AVA format. CSV file format described at https://research.google.com/ava/download.html. Args: csv_file: A file object. class_whitelist: If provided, boxes corresponding to (integer) class labels not in this set are skipped. Returns: boxes: A dictionary mapping each unique image key (string) to a list of boxes, given as coordinates [y1, x1, y2, x2]. labels: A dictionary mapping each unique image key (string) to a list of integer class lables, matching the corresponding box in `boxes`. scores: A dictionary mapping each unique image key (string) to a list of score values lables, matching the corresponding label in `labels`. If scores are not provided in the csv, then they will default to 1.0.
def read_csv(csv_file, class_whitelist=None, load_score=False): """Loads boxes and class labels from a CSV file in the AVA format. CSV file format described at https://research.google.com/ava/download.html. Args: csv_file: A file object. class_whitelist: If provided, boxes corresponding to (integer) class labels not in this set are skipped. Returns: boxes: A dictionary mapping each unique image key (string) to a list of boxes, given as coordinates [y1, x1, y2, x2]. labels: A dictionary mapping each unique image key (string) to a list of integer class lables, matching the corresponding box in `boxes`. scores: A dictionary mapping each unique image key (string) to a list of score values lables, matching the corresponding label in `labels`. If scores are not provided in the csv, then they will default to 1.0. """ boxes = defaultdict(list) labels = defaultdict(list) scores = defaultdict(list) with PathManager.open(csv_file, "r") as f: reader = csv.reader(f) for row in reader: assert len(row) in [7, 8], "Wrong number of columns: " + row image_key = make_image_key(row[0], row[1]) x1, y1, x2, y2 = [float(n) for n in row[2:6]] action_id = int(row[6]) if class_whitelist and action_id not in class_whitelist: continue score = 1.0 if load_score: score = float(row[7]) boxes[image_key].append([y1, x1, y2, x2]) labels[image_key].append(action_id) scores[image_key].append(score) return boxes, labels, scores
Reads a CSV file of excluded timestamps. Args: exclusions_file: A file object containing a csv of video-id,timestamp. Returns: A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904", or an empty set if exclusions file is None.
def read_exclusions(exclusions_file): """Reads a CSV file of excluded timestamps. Args: exclusions_file: A file object containing a csv of video-id,timestamp. Returns: A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904", or an empty set if exclusions file is None. """ excluded = set() if exclusions_file: with PathManager.open(exclusions_file, "r") as f: reader = csv.reader(f) for row in reader: assert len(row) == 2, "Expected only 2 columns, got: " + row excluded.add(make_image_key(row[0], row[1])) return excluded
Read label map and class ids.
def read_labelmap(labelmap_file): """Read label map and class ids.""" labelmap = [] class_ids = set() name = "" class_id = "" with PathManager.open(labelmap_file, "r") as f: for line in f: if line.startswith(" name:"): name = line.split('"')[1] elif line.startswith(" id:") or line.startswith(" label_id:"): class_id = int(line.strip().split(" ")[-1]) labelmap.append({"id": class_id, "name": name}) class_ids.add(class_id) return labelmap, class_ids
Run AVA evaluation given annotation/prediction files.
def evaluate_ava_from_files(labelmap, groundtruth, detections, exclusions): """Run AVA evaluation given annotation/prediction files.""" categories, class_whitelist = read_labelmap(labelmap) excluded_keys = read_exclusions(exclusions) groundtruth = read_csv(groundtruth, class_whitelist, load_score=False) detections = read_csv(detections, class_whitelist, load_score=True) run_evaluation(categories, groundtruth, detections, excluded_keys)
Run AVA evaluation given numpy arrays.
def evaluate_ava( preds, original_boxes, metadata, excluded_keys, class_whitelist, categories, groundtruth=None, video_idx_to_name=None, name="latest", ): """Run AVA evaluation given numpy arrays.""" eval_start = time.time() detections = get_ava_eval_data( preds, original_boxes, metadata, class_whitelist, video_idx_to_name=video_idx_to_name, ) logger.info("Evaluating with %d unique GT frames." % len(groundtruth[0])) logger.info( "Evaluating with %d unique detection frames" % len(detections[0]) ) write_results(detections, "detections_%s.csv" % name) write_results(groundtruth, "groundtruth_%s.csv" % name) results = run_evaluation(categories, groundtruth, detections, excluded_keys) logger.info("AVA eval done in %f seconds." % (time.time() - eval_start)) return results["PascalBoxes_Precision/[email protected]"]
AVA evaluation main logic.
def run_evaluation( categories, groundtruth, detections, excluded_keys, verbose=True ): """AVA evaluation main logic.""" pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator( categories ) boxes, labels, _ = groundtruth gt_keys = [] pred_keys = [] for image_key in boxes: if image_key in excluded_keys: logging.info( ( "Found excluded timestamp in ground truth: %s. " "It will be ignored." ), image_key, ) continue pascal_evaluator.add_single_ground_truth_image_info( image_key, { standard_fields.InputDataFields.groundtruth_boxes: np.array( boxes[image_key], dtype=float ), standard_fields.InputDataFields.groundtruth_classes: np.array( labels[image_key], dtype=int ), standard_fields.InputDataFields.groundtruth_difficult: np.zeros( len(boxes[image_key]), dtype=bool ), }, ) gt_keys.append(image_key) boxes, labels, scores = detections for image_key in boxes: if image_key in excluded_keys: logging.info( ( "Found excluded timestamp in detections: %s. " "It will be ignored." ), image_key, ) continue pascal_evaluator.add_single_detected_image_info( image_key, { standard_fields.DetectionResultFields.detection_boxes: np.array( boxes[image_key], dtype=float ), standard_fields.DetectionResultFields.detection_classes: np.array( labels[image_key], dtype=int ), standard_fields.DetectionResultFields.detection_scores: np.array( scores[image_key], dtype=float ), }, ) pred_keys.append(image_key) metrics = pascal_evaluator.evaluate() if du.is_master_proc(): pprint.pprint(metrics, indent=2) return metrics
Convert our data format into the data format used in official AVA evaluation.
def get_ava_eval_data( scores, boxes, metadata, class_whitelist, verbose=False, video_idx_to_name=None, ): """ Convert our data format into the data format used in official AVA evaluation. """ out_scores = defaultdict(list) out_labels = defaultdict(list) out_boxes = defaultdict(list) count = 0 for i in range(scores.shape[0]): video_idx = int(np.round(metadata[i][0])) sec = int(np.round(metadata[i][1])) video = video_idx_to_name[video_idx] key = video + "," + "%04d" % (sec) batch_box = boxes[i].tolist() # The first is batch idx. batch_box = [batch_box[j] for j in [0, 2, 1, 4, 3]] one_scores = scores[i].tolist() for cls_idx, score in enumerate(one_scores): if cls_idx + 1 in class_whitelist: out_scores[key].append(score) out_labels[key].append(cls_idx + 1) out_boxes[key].append(batch_box[1:]) count += 1 return out_boxes, out_labels, out_scores
Write prediction results into official formats.
def write_results(detections, filename): """Write prediction results into official formats.""" start = time.time() boxes, labels, scores = detections with PathManager.open(filename, "w") as f: for key in boxes.keys(): for box, label, score in zip(boxes[key], labels[key], scores[key]): f.write( "%s,%.03f,%.03f,%.03f,%.03f,%d,%.04f\n" % (key, box[1], box[0], box[3], box[2], label, score) ) logger.info("AVA results wrote to %s" % filename) logger.info("\ttook %d seconds." % (time.time() - start))
Benchmark the speed of data loading in PySlowFast. Args: cfg (CfgNode): configs. Details can be found in lib/config/defaults.py
def benchmark_data_loading(cfg): """ Benchmark the speed of data loading in PySlowFast. Args: cfg (CfgNode): configs. Details can be found in lib/config/defaults.py """ # Set up environment. setup_environment() # Set random seed from configs. np.random.seed(cfg.RNG_SEED) torch.manual_seed(cfg.RNG_SEED) # Setup logging format. logging.setup_logging(cfg.OUTPUT_DIR) # Print config. logger.info("Benchmark data loading with config:") logger.info(pprint.pformat(cfg)) timer = Timer() dataloader = loader.construct_loader(cfg, "train") logger.info( "Initialize loader using {:.2f} seconds.".format(timer.seconds()) ) # Total batch size across different machines. batch_size = cfg.TRAIN.BATCH_SIZE * cfg.NUM_SHARDS log_period = cfg.BENCHMARK.LOG_PERIOD epoch_times = [] # Test for a few epochs. for cur_epoch in range(cfg.BENCHMARK.NUM_EPOCHS): timer = Timer() timer_epoch = Timer() iter_times = [] if cfg.BENCHMARK.SHUFFLE: loader.shuffle_dataset(dataloader, cur_epoch) for cur_iter, _ in enumerate(tqdm.tqdm(dataloader)): if cur_iter > 0 and cur_iter % log_period == 0: iter_times.append(timer.seconds()) ram_usage, ram_total = misc.cpu_mem_usage() logger.info( "Epoch {}: {} iters ({} videos) in {:.2f} seconds. " "RAM Usage: {:.2f}/{:.2f} GB.".format( cur_epoch, log_period, log_period * batch_size, iter_times[-1], ram_usage, ram_total, ) ) timer.reset() epoch_times.append(timer_epoch.seconds()) ram_usage, ram_total = misc.cpu_mem_usage() logger.info( "Epoch {}: in total {} iters ({} videos) in {:.2f} seconds. " "RAM Usage: {:.2f}/{:.2f} GB.".format( cur_epoch, len(dataloader), len(dataloader) * batch_size, epoch_times[-1], ram_usage, ram_total, ) ) logger.info( "Epoch {}: on average every {} iters ({} videos) take {:.2f}/{:.2f} " "(avg/std) seconds.".format( cur_epoch, log_period, log_period * batch_size, np.mean(iter_times), np.std(iter_times), ) ) logger.info( "On average every epoch ({} videos) takes {:.2f}/{:.2f} " "(avg/std) seconds.".format( len(dataloader) * batch_size, np.mean(epoch_times), np.std(epoch_times), ) )
Compute and update the batch norm stats to make it more precise. During training both bn stats and the weight are changing after every iteration, so the bn can not precisely reflect the latest stats of the current model. Here the bn stats is recomputed without change of weights, to make the running mean and running var more precise. Args: model (model): the model using to compute and update the bn stats. data_loader (dataloader): dataloader using to provide inputs. num_batches (int): running iterations using to compute the stats.
def compute_and_update_bn_stats(model, data_loader, num_batches=200): """ Compute and update the batch norm stats to make it more precise. During training both bn stats and the weight are changing after every iteration, so the bn can not precisely reflect the latest stats of the current model. Here the bn stats is recomputed without change of weights, to make the running mean and running var more precise. Args: model (model): the model using to compute and update the bn stats. data_loader (dataloader): dataloader using to provide inputs. num_batches (int): running iterations using to compute the stats. """ # Prepares all the bn layers. bn_layers = [ m for m in model.modules() if any( ( isinstance(m, bn_type) for bn_type in ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, ) ) ) ] # In order to make the running stats only reflect the current batch, the # momentum is disabled. # bn.running_mean = (1 - momentum) * bn.running_mean + momentum * batch_mean # Setting the momentum to 1.0 to compute the stats without momentum. momentum_actual = [bn.momentum for bn in bn_layers] for bn in bn_layers: bn.momentum = 1.0 # Calculates the running iterations for precise stats computation. running_mean = [torch.zeros_like(bn.running_mean) for bn in bn_layers] running_square_mean = [torch.zeros_like(bn.running_var) for bn in bn_layers] for ind, (inputs, _, _) in enumerate( itertools.islice(data_loader, num_batches) ): # Forwards the model to update the bn stats. if isinstance(inputs, (list,)): for i in range(len(inputs)): inputs[i] = inputs[i].float().cuda(non_blocking=True) else: inputs = inputs.cuda(non_blocking=True) model(inputs) for i, bn in enumerate(bn_layers): # Accumulates the bn stats. running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1) # $E(x^2) = Var(x) + E(x)^2$. cur_square_mean = bn.running_var + bn.running_mean ** 2 running_square_mean[i] += ( cur_square_mean - running_square_mean[i] ) / (ind + 1) for i, bn in enumerate(bn_layers): bn.running_mean = running_mean[i] # Var(x) = $E(x^2) - E(x)^2$. bn.running_var = running_square_mean[i] - bn.running_mean ** 2 # Sets the precise bn stats. bn.momentum = momentum_actual[i]
Get the function to convert Caffe2 layer names to PyTorch layer names. Returns: (func): function to convert parameter name from Caffe2 format to PyTorch format.
def get_name_convert_func(): """ Get the function to convert Caffe2 layer names to PyTorch layer names. Returns: (func): function to convert parameter name from Caffe2 format to PyTorch format. """ pairs = [ # ------------------------------------------------------------ # 'nonlocal_conv3_1_theta_w' -> 's3.pathway0_nonlocal3.conv_g.weight' [ r"^nonlocal_conv([0-9]+)_([0-9]+)_(.*)", r"s\1.pathway0_nonlocal\2_\3", ], # 'theta' -> 'conv_theta' [r"^(.*)_nonlocal([0-9]+)_(theta)(.*)", r"\1_nonlocal\2.conv_\3\4"], # 'g' -> 'conv_g' [r"^(.*)_nonlocal([0-9]+)_(g)(.*)", r"\1_nonlocal\2.conv_\3\4"], # 'phi' -> 'conv_phi' [r"^(.*)_nonlocal([0-9]+)_(phi)(.*)", r"\1_nonlocal\2.conv_\3\4"], # 'out' -> 'conv_out' [r"^(.*)_nonlocal([0-9]+)_(out)(.*)", r"\1_nonlocal\2.conv_\3\4"], # 'nonlocal_conv4_5_bn_s' -> 's4.pathway0_nonlocal3.bn.weight' [r"^(.*)_nonlocal([0-9]+)_(bn)_(.*)", r"\1_nonlocal\2.\3.\4"], # ------------------------------------------------------------ # 't_pool1_subsample_bn' -> 's1_fuse.conv_f2s.bn.running_mean' [r"^t_pool1_subsample_bn_(.*)", r"s1_fuse.bn.\1"], # 't_pool1_subsample' -> 's1_fuse.conv_f2s' [r"^t_pool1_subsample_(.*)", r"s1_fuse.conv_f2s.\1"], # 't_res4_5_branch2c_bn_subsample_bn_rm' -> 's4_fuse.conv_f2s.bias' [ r"^t_res([0-9]+)_([0-9]+)_branch2c_bn_subsample_bn_(.*)", r"s\1_fuse.bn.\3", ], # 't_pool1_subsample' -> 's1_fuse.conv_f2s' [ r"^t_res([0-9]+)_([0-9]+)_branch2c_bn_subsample_(.*)", r"s\1_fuse.conv_f2s.\3", ], # ------------------------------------------------------------ # 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b' [ r"^res([0-9]+)_([0-9]+)_branch([0-9]+)([a-z])_(.*)", r"s\1.pathway0_res\2.branch\3.\4_\5", ], # 'res_conv1_bn_' -> 's1.pathway0_stem.bn.' [r"^res_conv1_bn_(.*)", r"s1.pathway0_stem.bn.\1"], # 'conv1_xy_w_momentum' -> 's1.pathway0_stem.conv_xy.' [r"^conv1_xy(.*)", r"s1.pathway0_stem.conv_xy\1"], # 'conv1_w_momentum' -> 's1.pathway0_stem.conv.' [r"^conv1_(.*)", r"s1.pathway0_stem.conv.\1"], # 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight' [ r"^res([0-9]+)_([0-9]+)_branch([0-9]+)_(.*)", r"s\1.pathway0_res\2.branch\3_\4", ], # 'res_conv1_' -> 's1.pathway0_stem.conv.' [r"^res_conv1_(.*)", r"s1.pathway0_stem.conv.\1"], # ------------------------------------------------------------ # 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b' [ r"^t_res([0-9]+)_([0-9]+)_branch([0-9]+)([a-z])_(.*)", r"s\1.pathway1_res\2.branch\3.\4_\5", ], # 'res_conv1_bn_' -> 's1.pathway0_stem.bn.' [r"^t_res_conv1_bn_(.*)", r"s1.pathway1_stem.bn.\1"], # 'conv1_w_momentum' -> 's1.pathway0_stem.conv.' [r"^t_conv1_(.*)", r"s1.pathway1_stem.conv.\1"], # 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight' [ r"^t_res([0-9]+)_([0-9]+)_branch([0-9]+)_(.*)", r"s\1.pathway1_res\2.branch\3_\4", ], # 'res_conv1_' -> 's1.pathway0_stem.conv.' [r"^t_res_conv1_(.*)", r"s1.pathway1_stem.conv.\1"], # ------------------------------------------------------------ # pred_ -> head.projection. [r"pred_(.*)", r"head.projection.\1"], # '.b_bn_fc' -> '.se.fc' [r"(.*)b_bn_fc(.*)", r"\1se.fc\2"], # conv_5 -> head.conv_5. [r"conv_5(.*)", r"head.conv_5\1"], # conv_5 -> head.conv_5. [r"lin_5(.*)", r"head.lin_5\1"], # '.bn_b' -> '.weight' [r"(.*)bn.b\Z", r"\1bn.bias"], # '.bn_s' -> '.weight' [r"(.*)bn.s\Z", r"\1bn.weight"], # '_bn_rm' -> '.running_mean' [r"(.*)bn.rm\Z", r"\1bn.running_mean"], # '_bn_riv' -> '.running_var' [r"(.*)bn.riv\Z", r"\1bn.running_var"], # '_b' -> '.bias' [r"(.*)[\._]b\Z", r"\1.bias"], # '_w' -> '.weight' [r"(.*)[\._]w\Z", r"\1.weight"], ] def convert_caffe2_name_to_pytorch(caffe2_layer_name): """ Convert the caffe2_layer_name to pytorch format by apply the list of regular expressions. Args: caffe2_layer_name (str): caffe2 layer name. Returns: (str): pytorch layer name. """ for source, dest in pairs: caffe2_layer_name = re.sub(source, dest, caffe2_layer_name) return caffe2_layer_name return convert_caffe2_name_to_pytorch
Creates the checkpoint directory (if not present already). Args: path_to_job (string): the path to the folder of the current job.
def make_checkpoint_dir(path_to_job): """ Creates the checkpoint directory (if not present already). Args: path_to_job (string): the path to the folder of the current job. """ checkpoint_dir = os.path.join(path_to_job, "checkpoints") # Create the checkpoint dir from the master process if du.is_master_proc() and not PathManager.exists(checkpoint_dir): try: PathManager.mkdirs(checkpoint_dir) except Exception: pass return checkpoint_dir
Get path for storing checkpoints. Args: path_to_job (string): the path to the folder of the current job.
def get_checkpoint_dir(path_to_job): """ Get path for storing checkpoints. Args: path_to_job (string): the path to the folder of the current job. """ return os.path.join(path_to_job, "checkpoints")
Get the full path to a checkpoint file. Args: path_to_job (string): the path to the folder of the current job. epoch (int): the number of epoch for the checkpoint.
def get_path_to_checkpoint(path_to_job, epoch): """ Get the full path to a checkpoint file. Args: path_to_job (string): the path to the folder of the current job. epoch (int): the number of epoch for the checkpoint. """ name = "checkpoint_epoch_{:05d}.pyth".format(epoch) return os.path.join(get_checkpoint_dir(path_to_job), name)
Get the last checkpoint from the checkpointing folder. Args: path_to_job (string): the path to the folder of the current job.
def get_last_checkpoint(path_to_job): """ Get the last checkpoint from the checkpointing folder. Args: path_to_job (string): the path to the folder of the current job. """ d = get_checkpoint_dir(path_to_job) names = PathManager.ls(d) if PathManager.exists(d) else [] names = [f for f in names if "checkpoint" in f] assert len(names), "No checkpoints found in '{}'.".format(d) # Sort the checkpoints by epoch. name = sorted(names)[-1] return os.path.join(d, name)
Determines if the given directory contains a checkpoint. Args: path_to_job (string): the path to the folder of the current job.
def has_checkpoint(path_to_job): """ Determines if the given directory contains a checkpoint. Args: path_to_job (string): the path to the folder of the current job. """ d = get_checkpoint_dir(path_to_job) files = PathManager.ls(d) if PathManager.exists(d) else [] return any("checkpoint" in f for f in files)
Determine if a checkpoint should be saved on current epoch. Args: cfg (CfgNode): configs to save. cur_epoch (int): current number of epoch of the model. multigrid_schedule (List): schedule for multigrid training.
def is_checkpoint_epoch(cfg, cur_epoch, multigrid_schedule=None): """ Determine if a checkpoint should be saved on current epoch. Args: cfg (CfgNode): configs to save. cur_epoch (int): current number of epoch of the model. multigrid_schedule (List): schedule for multigrid training. """ if cur_epoch + 1 == cfg.SOLVER.MAX_EPOCH: return True if multigrid_schedule is not None: prev_epoch = 0 for s in multigrid_schedule: if cur_epoch < s[-1]: period = max( (s[-1] - prev_epoch) // cfg.MULTIGRID.EVAL_FREQ + 1, 1 ) return (s[-1] - 1 - cur_epoch) % period == 0 prev_epoch = s[-1] return (cur_epoch + 1) % cfg.TRAIN.CHECKPOINT_PERIOD == 0